--- a/.hgtags Wed Oct 09 17:06:06 2019 -0700
+++ b/.hgtags Fri Oct 11 12:08:01 2019 +0530
@@ -588,3 +588,6 @@
cddef3bde924f3ff4f17f3d369280cf69d0450e5 jdk-14+14
9c250a7600e12bdb1e611835250af3204d4aa152 jdk-13-ga
778fc2dcbdaa8981e07e929a2cacef979c72261e jdk-14+15
+d29f0181ba424a95d881aba5eabf2e393abcc70f jdk-14+16
+5c83830390baafb76a1fbe33443c57620bd45fb9 jdk-14+17
+e84d8379815ba0d3e50fb096d28c25894cb50b8c jdk-14+18
--- a/make/CreateJmods.gmk Wed Oct 09 17:06:06 2019 -0700
+++ b/make/CreateJmods.gmk Fri Oct 11 12:08:01 2019 +0530
@@ -86,16 +86,18 @@
# from there. These files were explicitly filtered or modified in <module>-copy
# targets. For the rest, just pick up everything from the source legal dirs.
LEGAL_NOTICES := \
- $(SUPPORT_OUTPUTDIR)/modules_legal/common \
+ $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/common) \
$(if $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \
$(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \
$(call FindModuleLegalSrcDirs, $(MODULE)) \
)
-LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES))
-DEPS += $(call FindFiles, $(LEGAL_NOTICES))
+ifneq ($(strip $(LEGAL_NOTICES)), )
+ LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES))
+ DEPS += $(call FindFiles, $(LEGAL_NOTICES))
-JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH)
+ JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH)
+endif
ifeq ($(filter-out jdk.incubator.%, $(MODULE)), )
JMOD_FLAGS += --do-not-resolve-by-default
--- a/make/autoconf/basics.m4 Wed Oct 09 17:06:06 2019 -0700
+++ b/make/autoconf/basics.m4 Fri Oct 11 12:08:01 2019 +0530
@@ -213,8 +213,10 @@
if test "x[$]$1" != x; then
new_path="[$]$1"
- if [ [[ "$new_path" = ~* ]] ]; then
- # Use eval to expand a potential ~
+ # Use eval to expand a potential ~. This technique does not work if there
+ # are spaces in the path (which is valid at this point on Windows), so only
+ # try to apply it if there is an actual ~ first in the path.
+ if [ [[ "$new_path" = "~"* ]] ]; then
eval new_path="$new_path"
if test ! -f "$new_path" && test ! -d "$new_path"; then
AC_MSG_ERROR([The new_path of $1, which resolves as "$new_path", is not found.])
--- a/make/autoconf/version-numbers Wed Oct 09 17:06:06 2019 -0700
+++ b/make/autoconf/version-numbers Fri Oct 11 12:08:01 2019 +0530
@@ -35,7 +35,7 @@
DEFAULT_VERSION_DATE=2020-03-17
DEFAULT_VERSION_CLASSFILE_MAJOR=58 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
-DEFAULT_ACCEPTABLE_BOOT_VERSIONS="12 13 14"
+DEFAULT_ACCEPTABLE_BOOT_VERSIONS="13 14"
DEFAULT_JDK_SOURCE_TARGET_VERSION=14
DEFAULT_PROMOTED_VERSION_PRE=ea
--- a/make/common/NativeCompilation.gmk Wed Oct 09 17:06:06 2019 -0700
+++ b/make/common/NativeCompilation.gmk Fri Oct 11 12:08:01 2019 +0530
@@ -760,34 +760,6 @@
endif
endif
- # Create a rule to collect all the individual make dependency files into a
- # single makefile.
- $1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d
-
- $$($1_DEPS_FILE): $$($1_ALL_OBJS)
- $(RM) $$@
- # CD into dir to reduce risk of hitting command length limits, which
- # could otherwise happen if TOPDIR is a very long path.
- $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d > $$@.tmp
- $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d.targets | $(SORT) -u >> $$@.tmp
- # After generating the file, which happens after all objects have been
- # compiled, copy it to .old extension. On the next make invocation, this
- # .old file will be included by make.
- $(CP) $$@.tmp $$@.old
- $(MV) $$@.tmp $$@
-
- $1 += $$($1_DEPS_FILE)
-
- # The include must be on the .old file, which represents the state from the
- # previous invocation of make. The file being included must not have a rule
- # defined for it as otherwise make will think it has to run the rule before
- # being able to include the file, which would be wrong since we specifically
- # need the file as it was generated by a previous make invocation.
- ifneq ($$(wildcard $$($1_DEPS_FILE).old), )
- $1_DEPS_FILE_LOADED := true
- -include $$($1_DEPS_FILE).old
- endif
-
# Now call SetupCompileNativeFile for each source file we are going to compile.
$$(foreach file, $$($1_SRCS), \
$$(eval $$(call SetupCompileNativeFile, $1_$$(notdir $$(file)),\
@@ -850,6 +822,34 @@
endif
endif
+ # Create a rule to collect all the individual make dependency files into a
+ # single makefile.
+ $1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d
+
+ $$($1_DEPS_FILE): $$($1_ALL_OBJS) $$($1_RES)
+ $(RM) $$@
+ # CD into dir to reduce risk of hitting command length limits, which
+ # could otherwise happen if TOPDIR is a very long path.
+ $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d > $$@.tmp
+ $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d.targets | $(SORT) -u >> $$@.tmp
+ # After generating the file, which happens after all objects have been
+ # compiled, copy it to .old extension. On the next make invocation, this
+ # .old file will be included by make.
+ $(CP) $$@.tmp $$@.old
+ $(MV) $$@.tmp $$@
+
+ $1 += $$($1_DEPS_FILE)
+
+ # The include must be on the .old file, which represents the state from the
+ # previous invocation of make. The file being included must not have a rule
+ # defined for it as otherwise make will think it has to run the rule before
+ # being able to include the file, which would be wrong since we specifically
+ # need the file as it was generated by a previous make invocation.
+ ifneq ($$(wildcard $$($1_DEPS_FILE).old), )
+ $1_DEPS_FILE_LOADED := true
+ -include $$($1_DEPS_FILE).old
+ endif
+
ifneq ($(DISABLE_MAPFILES), true)
$1_REAL_MAPFILE := $$($1_MAPFILE)
ifeq ($(call isTargetOs, windows), false)
--- a/make/conf/jib-profiles.js Wed Oct 09 17:06:06 2019 -0700
+++ b/make/conf/jib-profiles.js Fri Oct 11 12:08:01 2019 +0530
@@ -365,7 +365,7 @@
};
};
- common.boot_jdk_version = "12";
+ common.boot_jdk_version = "13";
common.boot_jdk_build_number = "33";
common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-"
+ common.boot_jdk_version
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk Wed Oct 09 17:06:06 2019 -0700
+++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk Fri Oct 11 12:08:01 2019 +0530
@@ -73,7 +73,7 @@
($(CD) $(GENSRC_DIR)/META-INF/providers && \
p=""; \
impl=""; \
- for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \
+ for i in $$($(NAWK) '$$0=FILENAME" "$$0' * | $(SORT) -k 2 | $(SED) 's/ .*//'); do \
c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
if test x$$p != x$$c; then \
if test x$$p != x; then \
--- a/make/lib/CoreLibraries.gmk Wed Oct 09 17:06:06 2019 -0700
+++ b/make/lib/CoreLibraries.gmk Fri Oct 11 12:08:01 2019 +0530
@@ -23,8 +23,6 @@
# questions.
#
-WIN_VERIFY_LIB := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libverify/verify.lib
-
# Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, lib/CoreLibraries.gmk))
@@ -110,14 +108,14 @@
LDFLAGS_macosx := -L$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/, \
LDFLAGS_windows := -delayload:shell32.dll, \
LIBS := $(BUILD_LIBFDLIBM_TARGET), \
- LIBS_unix := -ljvm -lverify, \
+ LIBS_unix := -ljvm, \
LIBS_linux := $(LIBDL), \
LIBS_solaris := -lsocket -lnsl -lscf $(LIBDL), \
LIBS_aix := $(LIBDL) $(LIBM),\
LIBS_macosx := -framework CoreFoundation \
-framework Foundation \
-framework SystemConfiguration, \
- LIBS_windows := jvm.lib $(WIN_VERIFY_LIB) \
+ LIBS_windows := jvm.lib \
shell32.lib delayimp.lib \
advapi32.lib version.lib, \
))
--- a/src/hotspot/cpu/aarch64/aarch64.ad Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Fri Oct 11 12:08:01 2019 +0530
@@ -2513,17 +2513,8 @@
__ INSN(REG, as_Register(BASE)); \
}
-typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
-typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
-typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
- MacroAssembler::SIMD_RegVariant T, const Address &adr);
-
- // Used for all non-volatile memory accesses. The use of
- // $mem->opcode() to discover whether this pattern uses sign-extended
- // offsets is something of a kludge.
- static void loadStore(MacroAssembler masm, mem_insn insn,
- Register reg, int opcode,
- Register base, int index, int size, int disp)
+
+static Address mem2address(int opcode, Register base, int index, int size, int disp)
{
Address::extend scale;
@@ -2542,16 +2533,34 @@
}
if (index == -1) {
- (masm.*insn)(reg, Address(base, disp));
+ return Address(base, disp);
} else {
assert(disp == 0, "unsupported address mode: disp = %d", disp);
- (masm.*insn)(reg, Address(base, as_Register(index), scale));
+ return Address(base, as_Register(index), scale);
}
}
+
+typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
+typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
+typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
+typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
+ MacroAssembler::SIMD_RegVariant T, const Address &adr);
+
+ // Used for all non-volatile memory accesses. The use of
+ // $mem->opcode() to discover whether this pattern uses sign-extended
+ // offsets is something of a kludge.
+ static void loadStore(MacroAssembler masm, mem_insn insn,
+ Register reg, int opcode,
+ Register base, int index, int size, int disp)
+ {
+ Address addr = mem2address(opcode, base, index, size, disp);
+ (masm.*insn)(reg, addr);
+ }
+
static void loadStore(MacroAssembler masm, mem_float_insn insn,
- FloatRegister reg, int opcode,
- Register base, int index, int size, int disp)
+ FloatRegister reg, int opcode,
+ Register base, int index, int size, int disp)
{
Address::extend scale;
@@ -2573,8 +2582,8 @@
}
static void loadStore(MacroAssembler masm, mem_vector_insn insn,
- FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
- int opcode, Register base, int index, int size, int disp)
+ FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
+ int opcode, Register base, int index, int size, int disp)
{
if (index == -1) {
(masm.*insn)(reg, T, Address(base, disp));
@@ -3791,7 +3800,7 @@
static const int hi[Op_RegL + 1] = { // enum name
0, // Op_Node
0, // Op_Set
- OptoReg::Bad, // Op_RegN
+ OptoReg::Bad, // Op_RegN
OptoReg::Bad, // Op_RegI
R0_H_num, // Op_RegP
OptoReg::Bad, // Op_RegF
@@ -6923,7 +6932,7 @@
instruct loadP(iRegPNoSp dst, memory mem)
%{
match(Set dst (LoadP mem));
- predicate(!needs_acquiring_load(n));
+ predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
ins_cost(4 * INSN_COST);
format %{ "ldr $dst, $mem\t# ptr" %}
@@ -7616,6 +7625,7 @@
instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadP mem));
+ predicate(n->as_Load()->barrier_data() == 0);
ins_cost(VOLATILE_REF_COST);
format %{ "ldar $dst, $mem\t# ptr" %}
@@ -8552,6 +8562,7 @@
instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ predicate(n->as_LoadStore()->barrier_data() == 0);
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
@@ -8665,7 +8676,7 @@
instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- predicate(needs_acquiring_load_exclusive(n));
+ predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
@@ -8796,6 +8807,7 @@
%}
instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
@@ -8895,7 +8907,7 @@
%}
instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- predicate(needs_acquiring_load_exclusive(n));
+ predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
@@ -8996,6 +9008,7 @@
%}
instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
@@ -9103,8 +9116,8 @@
%}
instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
@@ -9154,6 +9167,7 @@
%}
instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
+ predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set prev (GetAndSetP mem newv));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
@@ -9197,7 +9211,7 @@
%}
instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
- predicate(needs_acquiring_load_exclusive(n));
+ predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set prev (GetAndSetP mem newv));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
--- a/src/hotspot/cpu/aarch64/abstractInterpreter_aarch64.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/abstractInterpreter_aarch64.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -162,16 +162,12 @@
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
-#ifndef PRODUCT
- NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
- // read the value once
- volatile intptr_t data = method_holder->data();
- assert(data == 0 || data == (intptr_t)callee(),
- "a) MT-unsafe modification of inline cache");
- assert(data == 0 || jump->jump_destination() == entry,
- "b) MT-unsafe modification of inline cache");
+#ifdef ASSERT
+ NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
+ verify_mt_safe(callee, entry, method_holder, jump);
#endif
+
// Update stub.
method_holder->set_data((intptr_t)callee());
NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry);
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -211,9 +211,14 @@
if (borrow_reg) {
// No free registers available. Make one useful.
tmp = rscratch1;
+ if (tmp == dst) {
+ tmp = rscratch2;
+ }
__ push(RegSet::of(tmp), sp);
}
+ assert_different_registers(tmp, dst);
+
Label done;
__ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ eon(tmp, tmp, zr);
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -24,22 +24,23 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
+#include "code/vmreg.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
+#include "gc/z/zThreadLocalData.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#endif // COMPILER1
-
-#include "gc/z/zThreadLocalData.hpp"
-
-ZBarrierSetAssembler::ZBarrierSetAssembler() :
- _load_barrier_slow_stub(),
- _load_barrier_weak_slow_stub() {}
+#ifdef COMPILER2
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif // COMPILER2
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -66,7 +67,7 @@
assert_different_registers(rscratch1, rscratch2, src.base());
assert_different_registers(rscratch1, rscratch2, dst);
- RegSet savedRegs = RegSet::range(r0,r28) - RegSet::of(dst, rscratch1, rscratch2);
+ RegSet savedRegs = RegSet::range(r0, r28) - RegSet::of(dst, rscratch1, rscratch2);
Label done;
@@ -206,7 +207,8 @@
// The Address offset is too large to direct load - -784. Our range is +127, -128.
__ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
- in_bytes(JavaThread::jni_environment_offset())));
+ in_bytes(JavaThread::jni_environment_offset())));
+
// Load address bad mask
__ add(tmp, jni_env, tmp);
__ ldr(tmp, Address(tmp));
@@ -294,12 +296,12 @@
__ prologue("zgc_load_barrier stub", false);
// We don't use push/pop_clobbered_registers() - we need to pull out the result from r0.
- for (int i = 0; i < 32; i +=2) {
- __ stpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ pre(sp,-16)));
+ for (int i = 0; i < 32; i += 2) {
+ __ stpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ pre(sp,-16)));
}
- RegSet saveRegs = RegSet::range(r0,r28) - RegSet::of(r0);
- __ push(saveRegs, sp);
+ const RegSet save_regs = RegSet::range(r1, r28);
+ __ push(save_regs, sp);
// Setup arguments
__ load_parameter(0, c_rarg0);
@@ -307,98 +309,161 @@
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
- __ pop(saveRegs, sp);
+ __ pop(save_regs, sp);
- for (int i = 30; i >0; i -=2) {
- __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ post(sp, 16)));
- }
+ for (int i = 30; i >= 0; i -= 2) {
+ __ ldpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ post(sp, 16)));
+ }
__ epilogue();
}
#endif // COMPILER1
+#ifdef COMPILER2
+
+OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
+ if (!OptoReg::is_reg(opto_reg)) {
+ return OptoReg::Bad;
+ }
+
+ const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+ if (vm_reg->is_FloatRegister()) {
+ return opto_reg & ~1;
+ }
+
+ return opto_reg;
+}
+
#undef __
-#define __ cgen->assembler()->
+#define __ _masm->
+
+class ZSaveLiveRegisters {
+private:
+ MacroAssembler* const _masm;
+ RegSet _gp_regs;
+ RegSet _fp_regs;
+
+public:
+ void initialize(ZLoadBarrierStubC2* stub) {
+ // Create mask of live registers
+ RegMask live = stub->live();
-// Generates a register specific stub for calling
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-//
-// The raddr register serves as both input and output for this stub. When the stub is
-// called the raddr register contains the object field address (oop*) where the bad oop
-// was loaded from, which caused the slow path to be taken. On return from the stub the
-// raddr register contains the good/healed oop returned from
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
- // Don't generate stub for invalid registers
- if (raddr == zr || raddr == r29 || raddr == r30) {
- return NULL;
+ // Record registers that needs to be saved/restored
+ while (live.is_NotEmpty()) {
+ const OptoReg::Name opto_reg = live.find_first_elem();
+ live.Remove(opto_reg);
+ if (OptoReg::is_reg(opto_reg)) {
+ const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+ if (vm_reg->is_Register()) {
+ _gp_regs += RegSet::of(vm_reg->as_Register());
+ } else if (vm_reg->is_FloatRegister()) {
+ _fp_regs += RegSet::of((Register)vm_reg->as_FloatRegister());
+ } else {
+ fatal("Unknown register type");
+ }
+ }
+ }
+
+ // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
+ _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref());
+ }
+
+ ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+ _masm(masm),
+ _gp_regs(),
+ _fp_regs() {
+
+ // Figure out what registers to save/restore
+ initialize(stub);
+
+ // Save registers
+ __ push(_gp_regs, sp);
+ __ push_fp(_fp_regs, sp);
}
- // Create stub name
- char name[64];
- const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
- os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
+ ~ZSaveLiveRegisters() {
+ // Restore registers
+ __ pop_fp(_fp_regs, sp);
+ __ pop(_gp_regs, sp);
+ }
+};
+
+#undef __
+#define __ _masm->
- __ align(CodeEntryAlignment);
- StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
- address start = __ pc();
+class ZSetupArguments {
+private:
+ MacroAssembler* const _masm;
+ const Register _ref;
+ const Address _ref_addr;
+
+public:
+ ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+ _masm(masm),
+ _ref(stub->ref()),
+ _ref_addr(stub->ref_addr()) {
- // Save live registers
- RegSet savedRegs = RegSet::range(r0,r18) - RegSet::of(raddr);
-
- __ enter();
- __ push(savedRegs, sp);
-
- // Setup arguments
- if (raddr != c_rarg1) {
- __ mov(c_rarg1, raddr);
+ // Setup arguments
+ if (_ref_addr.base() == noreg) {
+ // No self healing
+ if (_ref != c_rarg0) {
+ __ mov(c_rarg0, _ref);
+ }
+ __ mov(c_rarg1, 0);
+ } else {
+ // Self healing
+ if (_ref == c_rarg0) {
+ // _ref is already at correct place
+ __ lea(c_rarg1, _ref_addr);
+ } else if (_ref != c_rarg1) {
+ // _ref is in wrong place, but not in c_rarg1, so fix it first
+ __ lea(c_rarg1, _ref_addr);
+ __ mov(c_rarg0, _ref);
+ } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
+ assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
+ __ mov(c_rarg0, _ref);
+ __ lea(c_rarg1, _ref_addr);
+ } else {
+ assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
+ if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) {
+ __ mov(rscratch2, c_rarg1);
+ __ lea(c_rarg1, _ref_addr);
+ __ mov(c_rarg0, rscratch2);
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+ }
}
- __ ldr(c_rarg0, Address(raddr));
+ ~ZSetupArguments() {
+ // Transfer result
+ if (_ref != r0) {
+ __ mov(_ref, r0);
+ }
+ }
+};
+
+#undef __
+#define __ masm->
- // Call barrier function
- __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
+ BLOCK_COMMENT("ZLoadBarrierStubC2");
+
+ // Stub entry
+ __ bind(*stub->entry());
- // Move result returned in r0 to raddr, if needed
- if (raddr != r0) {
- __ mov(raddr, r0);
+ {
+ ZSaveLiveRegisters save_live_registers(masm, stub);
+ ZSetupArguments setup_arguments(masm, stub);
+ __ mov(rscratch1, stub->slow_path());
+ __ blr(rscratch1);
}
- __ pop(savedRegs, sp);
- __ leave();
- __ ret(lr);
-
- return start;
+ // Stub exit
+ __ b(*stub->continuation());
}
#undef __
-static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
- const int nregs = 28; // Exclude FP, XZR, SP from calculation.
- const int code_size = nregs * 254; // Rough estimate of code size
-
- ResourceMark rm;
-
- CodeBuffer buf(BufferBlob::create(label, code_size));
- StubCodeGenerator cgen(&buf);
-
- for (int i = 0; i < nregs; i++) {
- const Register reg = as_Register(i);
- stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
- }
-}
-
-void ZBarrierSetAssembler::barrier_stubs_init() {
- barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
- barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
-}
-
-address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
- return _load_barrier_slow_stub[reg->encoding()];
-}
-
-address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
- return _load_barrier_weak_slow_stub[reg->encoding()];
-}
+#endif // COMPILER2
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -24,6 +24,12 @@
#ifndef CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
+#include "code/vmreg.hpp"
+#include "oops/accessDecorators.hpp"
+#ifdef COMPILER2
+#include "opto/optoreg.hpp"
+#endif // COMPILER2
+
#ifdef COMPILER1
class LIR_Assembler;
class LIR_OprDesc;
@@ -32,14 +38,13 @@
class ZLoadBarrierStubC1;
#endif // COMPILER1
+#ifdef COMPILER2
+class Node;
+class ZLoadBarrierStubC2;
+#endif // COMPILER2
+
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
-private:
- address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
- address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
-
public:
- ZBarrierSetAssembler();
-
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
@@ -83,10 +88,13 @@
DecoratorSet decorators) const;
#endif // COMPILER1
- virtual void barrier_stubs_init();
+#ifdef COMPILER2
+ OptoReg::Name refine_register(const Node* node,
+ OptoReg::Name opto_reg);
- address load_barrier_slow_stub(Register reg);
- address load_barrier_weak_slow_stub(Register reg);
+ void generate_c2_load_barrier_stub(MacroAssembler* masm,
+ ZLoadBarrierStubC2* stub) const;
+#endif // COMPILER2
};
#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -40,7 +40,7 @@
// +--------------------------------+ 0x0000014000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000010000000000 (16TB)
-// | (Reserved, but unused) |
+// . .
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
@@ -75,7 +75,7 @@
// +--------------------------------+ 0x0000280000000000 (40TB)
// | Remapped View |
// +--------------------------------+ 0x0000200000000000 (32TB)
-// | (Reserved, but unused) |
+// . .
// +--------------------------------+ 0x0000180000000000 (24TB)
// | Marked1 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
@@ -110,7 +110,7 @@
// +--------------------------------+ 0x0000500000000000 (80TB)
// | Remapped View |
// +--------------------------------+ 0x0000400000000000 (64TB)
-// | (Reserved, but unused) |
+// . .
// +--------------------------------+ 0x0000300000000000 (48TB)
// | Marked1 View |
// +--------------------------------+ 0x0000200000000000 (32TB)
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -36,7 +36,6 @@
// ------------------------------------------------------------------
//
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
-const size_t ZPlatformMaxHeapSizeShift = 46; // 16TB
const size_t ZPlatformNMethodDisarmedOffset = 4;
const size_t ZPlatformCacheLineSize = 64;
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad Fri Oct 11 12:08:01 2019 +0530
@@ -24,155 +24,244 @@
source_hpp %{
#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
%}
source %{
-#include "gc/z/zBarrierSetAssembler.hpp"
-
-static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst,
- Register base, int index, int scale,
- int disp, bool weak) {
- const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
- : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
+static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
+ __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(tmp, tmp, ref);
+ __ cbnz(tmp, *stub->entry());
+ __ bind(*stub->continuation());
+}
- if (index == -1) {
- if (disp != 0) {
- __ lea(dst, Address(base, disp));
- } else {
- __ mov(dst, base);
- }
- } else {
- Register index_reg = as_Register(index);
- if (disp == 0) {
- __ lea(dst, Address(base, index_reg, Address::lsl(scale)));
- } else {
- __ lea(dst, Address(base, disp));
- __ lea(dst, Address(dst, index_reg, Address::lsl(scale)));
- }
- }
-
- __ far_call(RuntimeAddress(stub));
+static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
+ __ b(*stub->entry());
+ __ bind(*stub->continuation());
}
%}
-//
-// Execute ZGC load barrier (strong) slow path
-//
-instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr,
- vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
- vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
- vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
- vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
- vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
- vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
- vRegD_V30 v30, vRegD_V31 v31) %{
- match(Set dst (LoadBarrierSlowReg src dst));
- predicate(!n->as_LoadBarrierSlowReg()->is_weak());
+// Load Pointer
+instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr)
+%{
+ match(Set dst (LoadP mem));
+ predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierStrong));
+ effect(TEMP dst, KILL cr);
- effect(KILL cr,
- KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
- KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
- KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
- KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
- KILL v29, KILL v30, KILL v31);
+ ins_cost(4 * INSN_COST);
- format %{ "lea $dst, $src\n\t"
- "call #ZLoadBarrierSlowPath" %}
+ format %{ "ldr $dst, $mem" %}
ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
- $src$$index, $src$$scale, $src$$disp, false);
+ const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+ __ ldr($dst$$Register, ref_addr);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
%}
- ins_pipe(pipe_slow);
+
+ ins_pipe(iload_reg_mem);
%}
-//
-// Execute ZGC load barrier (weak) slow path
-//
-instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr,
- vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
- vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
- vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
- vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19,
- vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
- vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
- vRegD_V30 v30, vRegD_V31 v31) %{
- match(Set dst (LoadBarrierSlowReg src dst));
- predicate(n->as_LoadBarrierSlowReg()->is_weak());
+// Load Weak Pointer
+instruct zLoadWeakP(iRegPNoSp dst, memory mem, rFlagsReg cr)
+%{
+ match(Set dst (LoadP mem));
+ predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierWeak));
+ effect(TEMP dst, KILL cr);
- effect(KILL cr,
- KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
- KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
- KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
- KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
- KILL v29, KILL v30, KILL v31);
+ ins_cost(4 * INSN_COST);
- format %{ "lea $dst, $src\n\t"
- "call #ZLoadBarrierSlowPath" %}
+ format %{ "ldr $dst, $mem" %}
ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
- $src$$index, $src$$scale, $src$$disp, true);
+ const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+ __ ldr($dst$$Register, ref_addr);
+ z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, true /* weak */);
%}
- ins_pipe(pipe_slow);
+
+ ins_pipe(iload_reg_mem);
%}
+// Load Pointer Volatile
+instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
+%{
+ match(Set dst (LoadP mem));
+ predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP dst, KILL cr);
-// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
-// but doesn't affect output.
+ ins_cost(VOLATILE_REF_COST);
-instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem,
- iRegP oldval, iRegP newval, iRegP keepalive,
- rFlagsReg cr) %{
- match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval)));
- ins_cost(2 * VOLATILE_REF_COST);
- effect(TEMP_DEF res, KILL cr);
- format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+ format %{ "ldar $dst, $mem\t" %}
+
+ ins_encode %{
+ __ ldar($dst$$Register, $mem$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
%}
- ins_encode %{
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
- Assembler::xword, /*acquire*/ false, /*release*/ true,
- /*weak*/ false, $res$$Register);
- %}
- ins_pipe(pipe_slow);
+
+ ins_pipe(pipe_serial);
%}
-instruct z_compareAndSwapP(iRegINoSp res,
- indirect mem,
- iRegP oldval, iRegP newval, iRegP keepalive,
- rFlagsReg cr) %{
-
- match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
- match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval)));
+instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr, TEMP_DEF res);
ins_cost(2 * VOLATILE_REF_COST);
- effect(KILL cr);
+ format %{ "cmpxchg $mem, $oldval, $newval\n\t"
+ "cset $res, EQ" %}
- format %{
- "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
- "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
- %}
-
- ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
- aarch64_enc_cset_eq(res));
+ ins_encode %{
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, rscratch2);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ __ bind(good);
+ }
+ %}
ins_pipe(pipe_slow);
%}
+instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
+ effect(KILL cr, TEMP_DEF res);
-instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev,
- iRegP keepalive) %{
- match(Set prev (ZGetAndSetP mem (Binary newv keepalive)));
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "cmpxchg $mem, $oldval, $newval\n\t"
+ "cset $res, EQ" %}
+
+ ins_encode %{
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, rscratch2);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP_DEF res, KILL cr);
ins_cost(2 * VOLATILE_REF_COST);
- format %{ "atomic_xchg $prev, $newv, [$mem]" %}
+
+ format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
+
+ ins_encode %{
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, $res$$Register);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP_DEF res, KILL cr);
+
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
+
ins_encode %{
- __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, $res$$Register);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
+ match(Set prev (GetAndSetP mem newv));
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP_DEF prev, KILL cr);
+
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "atomic_xchg $prev, $newv, [$mem]" %}
+
+ ins_encode %{
+ __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
+ %}
+
+ ins_pipe(pipe_serial);
+%}
+
+instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
+ match(Set prev (GetAndSetP mem newv));
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
+ effect(TEMP_DEF prev, KILL cr);
+
+ ins_cost(VOLATILE_REF_COST);
+
+ format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
+
+ ins_encode %{
+ __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
%}
ins_pipe(pipe_serial);
%}
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -2132,6 +2132,65 @@
return count;
}
+
+// Push lots of registers in the bit set supplied. Don't push sp.
+// Return the number of words pushed
+int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
+ int words_pushed = 0;
+
+ // Scan bitset to accumulate register pairs
+ unsigned char regs[32];
+ int count = 0;
+ for (int reg = 0; reg <= 31; reg++) {
+ if (1 & bitset)
+ regs[count++] = reg;
+ bitset >>= 1;
+ }
+ regs[count++] = zr->encoding_nocheck();
+ count &= ~1; // Only push an even number of regs
+
+ // Always pushing full 128 bit registers.
+ if (count) {
+ stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2)));
+ words_pushed += 2;
+ }
+ for (int i = 2; i < count; i += 2) {
+ stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
+ words_pushed += 2;
+ }
+
+ assert(words_pushed == count, "oops, pushed != count");
+ return count;
+}
+
+int MacroAssembler::pop_fp(unsigned int bitset, Register stack) {
+ int words_pushed = 0;
+
+ // Scan bitset to accumulate register pairs
+ unsigned char regs[32];
+ int count = 0;
+ for (int reg = 0; reg <= 31; reg++) {
+ if (1 & bitset)
+ regs[count++] = reg;
+ bitset >>= 1;
+ }
+ regs[count++] = zr->encoding_nocheck();
+ count &= ~1;
+
+ for (int i = 2; i < count; i += 2) {
+ ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
+ words_pushed += 2;
+ }
+ if (count) {
+ ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2)));
+ words_pushed += 2;
+ }
+
+ assert(words_pushed == count, "oops, pushed != count");
+
+ return count;
+}
+
#ifdef ASSERT
void MacroAssembler::verify_heapbase(const char* msg) {
#if 0
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -442,12 +442,18 @@
int push(unsigned int bitset, Register stack);
int pop(unsigned int bitset, Register stack);
+ int push_fp(unsigned int bitset, Register stack);
+ int pop_fp(unsigned int bitset, Register stack);
+
void mov(Register dst, Address a);
public:
void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
+ void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
+ void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
+
// Push and pop everything that might be clobbered by a native
// runtime call except rscratch1 and rscratch2. (They are always
// scratch, so we don't have to protect them.) Only save the lower
--- a/src/hotspot/cpu/aarch64/register_aarch64.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/aarch64/register_aarch64.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -230,6 +230,11 @@
return *this;
}
+ RegSet &operator-=(const RegSet aSet) {
+ *this = *this - aSet;
+ return *this;
+ }
+
static RegSet of(Register r1) {
return RegSet(r1);
}
--- a/src/hotspot/cpu/arm/abstractInterpreter_arm.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/arm/abstractInterpreter_arm.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -27,6 +27,7 @@
#include "interpreter/bytecode.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/arm/compiledIC_arm.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,16 +115,7 @@
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
- // read the value once
- volatile intptr_t data = method_holder->data();
- volatile address destination = jump->jump_destination();
- assert(data == 0 || data == (intptr_t)callee(),
- "a) MT-unsafe modification of inline cache");
- assert(destination == (address)-1 || destination == entry,
- "b) MT-unsafe modification of inline cache");
-#endif
+ verify_mt_safe(callee, entry, method_holder, jump);
// Update stub.
method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/debug.hpp"
--- a/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@
//}
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
opr = as_long_opr(reg);
- } else if (type == T_OBJECT || type == T_ARRAY) {
+ } else if (is_reference_type(type)) {
opr = as_oop_opr(reg);
} else {
opr = as_opr(reg);
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1237,7 +1237,7 @@
} else {
ShouldNotReachHere();
}
- if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
+ if (is_reference_type(to_reg->type())) {
__ verify_oop(to_reg->as_register());
}
}
@@ -1253,7 +1253,7 @@
Register disp_reg = noreg;
int disp_value = addr->disp();
bool needs_patching = (patch_code != lir_patch_none);
- bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide &&
+ bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide &&
CompressedOops::mode() != CompressedOops::UnscaledNarrowOop;
bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);
bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.
@@ -1473,7 +1473,7 @@
}
} else {
assert(opr1->type() != T_ADDRESS && opr2->type() != T_ADDRESS, "currently unsupported");
- if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
+ if (is_reference_type(opr1->type())) {
// There are only equal/notequal comparisons on objects.
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
__ cmpd(BOOL_RESULT, opr1->as_register(), opr2->as_register());
@@ -2315,8 +2315,8 @@
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )
if (UseSlowPath ||
- (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
- (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
+ (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
+ (!UseFastNewTypeArray && (!is_reference_type(op->type())))) {
__ b(*op->stub()->entry());
} else {
__ allocate_array(op->obj()->as_register(),
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -648,7 +648,7 @@
__ membar_release();
}
- if (type == T_OBJECT || type == T_ARRAY) {
+ if (is_reference_type(type)) {
if (UseCompressedOops) {
t1 = new_register(T_OBJECT);
t2 = new_register(T_OBJECT);
--- a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -82,9 +82,9 @@
define_pd_global(bool, IdealizeClearArrayNode, true);
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(uintx, ReservedCodeCacheSize, 256*M);
-define_pd_global(uintx, NonProfiledCodeHeapSize, 125*M);
-define_pd_global(uintx, ProfiledCodeHeapSize, 126*M);
+define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
+define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
+define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
--- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -178,15 +178,7 @@
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
-#ifdef ASSERT
- // read the value once
- volatile intptr_t data = method_holder->data();
- volatile address destination = jump->jump_destination();
- assert(data == 0 || data == (intptr_t)callee(),
- "a) MT-unsafe modification of inline cache");
- assert(destination == (address)-1 || destination == entry,
- "b) MT-unsafe modification of inline cache");
-#endif
+ verify_mt_safe(callee, entry, method_holder, jump);
// Update stub.
method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -305,7 +305,7 @@
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, bool needs_frame, Label *L_handle_null) {
- bool on_oop = type == T_OBJECT || type == T_ARRAY;
+ bool on_oop = is_reference_type(type);
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool on_reference = on_weak || on_phantom;
--- a/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,7 @@
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
- if (type == T_OBJECT || type == T_ARRAY) {
+ if (is_reference_type(type)) {
oop_store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, needs_frame);
} else {
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, needs_frame);
--- a/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1142,7 +1142,7 @@
}
if (!r_2->is_valid()) {
// Not sure we need to do this but it shouldn't hurt.
- if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
+ if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) {
__ ld(r, ld_offset, ld_ptr);
ld_offset-=wordSize;
} else {
@@ -1739,8 +1739,7 @@
Register temp_reg = R19_method; // not part of any compiled calling seq
if (VerifyOops) {
for (int i = 0; i < method->size_of_parameters(); i++) {
- if (sig_bt[i] == T_OBJECT ||
- sig_bt[i] == T_ARRAY) {
+ if (is_reference_type(sig_bt[i])) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
@@ -2602,7 +2601,7 @@
// Unbox oop result, e.g. JNIHandles::resolve value.
// --------------------------------------------------------------------------
- if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
+ if (is_reference_type(ret_type)) {
__ resolve_jobject(R3_RET, r_temp_1, r_temp_2, /* needs_frame */ false);
}
--- a/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/debug.hpp"
--- a/src/hotspot/cpu/s390/c1_FrameMap_s390.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/s390/c1_FrameMap_s390.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
Register reg = r_1->as_Register();
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
opr = as_long_opr(reg);
- } else if (type == T_OBJECT || type == T_ARRAY) {
+ } else if (is_reference_type(type)) {
opr = as_oop_opr(reg);
} else if (type == T_METADATA) {
opr = as_metadata_opr(reg);
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -972,6 +972,7 @@
} else {
__ z_lg(dest->as_register(), disp_value, disp_reg, src);
}
+ __ verify_oop(dest->as_register());
break;
}
case T_FLOAT:
@@ -991,9 +992,6 @@
case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break;
default : ShouldNotReachHere();
}
- if (type == T_ARRAY || type == T_OBJECT) {
- __ verify_oop(dest->as_register());
- }
if (patch != NULL) {
patching_epilog(patch, patch_code, src, info);
@@ -1006,7 +1004,7 @@
assert(dest->is_register(), "should not call otherwise");
if (dest->is_single_cpu()) {
- if (type == T_ARRAY || type == T_OBJECT) {
+ if (is_reference_type(type)) {
__ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
__ verify_oop(dest->as_register());
} else if (type == T_METADATA) {
@@ -1034,7 +1032,7 @@
if (src->is_single_cpu()) {
const Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
- if (type == T_OBJECT || type == T_ARRAY) {
+ if (is_reference_type(type)) {
__ verify_oop(src->as_register());
__ reg2mem_opt(src->as_register(), dst, true);
} else if (type == T_METADATA) {
@@ -1080,7 +1078,7 @@
} else {
ShouldNotReachHere();
}
- if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
+ if (is_reference_type(to_reg->type())) {
__ verify_oop(to_reg->as_register());
}
}
@@ -1131,7 +1129,7 @@
assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
- if (type == T_ARRAY || type == T_OBJECT) {
+ if (is_reference_type(type)) {
__ verify_oop(from->as_register());
}
@@ -1294,10 +1292,10 @@
Register reg1 = opr1->as_register();
if (opr2->is_single_cpu()) {
// cpu register - cpu register
- if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
+ if (is_reference_type(opr1->type())) {
__ z_clgr(reg1, opr2->as_register());
} else {
- assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
+ assert(!is_reference_type(opr2->type()), "cmp int, oop?");
if (unsigned_comp) {
__ z_clr(reg1, opr2->as_register());
} else {
@@ -1306,7 +1304,7 @@
}
} else if (opr2->is_stack()) {
// cpu register - stack
- if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
+ if (is_reference_type(opr1->type())) {
__ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
} else {
if (unsigned_comp) {
@@ -1324,7 +1322,7 @@
} else {
__ z_cfi(reg1, c->as_jint());
}
- } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
+ } else if (is_reference_type(c->type())) {
// In 64bit oops are single register.
jobject o = c->as_jobject();
if (o == NULL) {
@@ -1767,7 +1765,7 @@
}
} else {
Register r_lo;
- if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
+ if (is_reference_type(right->type())) {
r_lo = right->as_register();
} else {
r_lo = right->as_register_lo();
@@ -2413,8 +2411,8 @@
__ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend
if (UseSlowPath ||
- (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
- (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
+ (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
+ (!UseFastNewTypeArray && (!is_reference_type(op->type())))) {
__ z_brul(*op->stub()->entry());
} else {
__ allocate_array(op->obj()->as_register(),
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -104,19 +104,7 @@
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
- // A generated lambda form might be deleted from the Lambdaform
- // cache in MethodTypeForm. If a jit compiled lambdaform method
- // becomes not entrant and the cache access returns null, the new
- // resolve will lead to a new generated LambdaForm.
- volatile intptr_t data = method_holder->data();
- volatile address destination = jump->jump_destination();
- assert(data == 0 || data == (intptr_t)callee() || callee->is_compiled_lambda_form(),
- "a) MT-unsafe modification of inline cache");
- assert(destination == (address)-1 || destination == entry,
- "b) MT-unsafe modification of inline cache");
-#endif
+ verify_mt_safe(callee, entry, method_holder, jump);
// Update stub.
method_holder->set_data((intptr_t)callee(), relocInfo::metadata_type);
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -100,7 +100,7 @@
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null) {
- bool on_oop = type == T_OBJECT || type == T_ARRAY;
+ bool on_oop = is_reference_type(type);
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool on_reference = on_weak || on_phantom;
--- a/src/hotspot/cpu/s390/gc/shared/modRefBarrierSetAssembler_s390.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/s390/gc/shared/modRefBarrierSetAssembler_s390.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,14 +36,14 @@
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
- if (type == T_OBJECT || type == T_ARRAY) {
+ if (is_reference_type(type)) {
gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
}
}
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Register count, bool do_return) {
- if (type == T_OBJECT || type == T_ARRAY) {
+ if (is_reference_type(type)) {
gen_write_ref_array_post_barrier(masm, decorators, dst, count, do_return);
} else {
if (do_return) { __ z_br(Z_R14); }
@@ -52,7 +52,7 @@
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
- if (type == T_OBJECT || type == T_ARRAY) {
+ if (is_reference_type(type)) {
oop_store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
} else {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -883,7 +883,7 @@
if (!VerifyOops) { return; }
for (int i = 0; i < total_args_passed; i++) {
- if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
+ if (is_reference_type(sig_bt[i])) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
@@ -2318,7 +2318,7 @@
__ reset_last_Java_frame();
// Unpack oop result, e.g. JNIHandles::resolve result.
- if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
+ if (is_reference_type(ret_type)) {
__ resolve_jobject(Z_RET, /* tmp1 */ Z_R13, /* tmp2 */ Z_R7);
}
@@ -2621,7 +2621,7 @@
} else {
if (!r_2->is_valid()) {
// Not sure we need to do this but it shouldn't hurt.
- if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
+ if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) {
__ z_lg(r_1->as_Register(), ld_offset, ld_ptr);
} else {
__ z_l(r_1->as_Register(), ld_offset, ld_ptr);
--- a/src/hotspot/cpu/sparc/abstractInterpreter_sparc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/sparc/abstractInterpreter_sparc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/constMethod.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
--- a/src/hotspot/cpu/sparc/compiledIC_sparc.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/sparc/compiledIC_sparc.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -104,16 +104,7 @@
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
- // read the value once
- volatile intptr_t data = method_holder->data();
- volatile address destination = jump->jump_destination();
- assert(data == 0 || data == (intptr_t)callee(),
- "a) MT-unsafe modification of inline cache");
- assert(destination == (address)-1 || destination == entry,
- "b) MT-unsafe modification of inline cache");
-#endif
+ verify_mt_safe(callee, entry, method_holder, jump);
// Update stub.
method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciMethod.hpp"
#include "interpreter/interpreter.hpp"
+#include "oops/klass.inline.hpp"
#include "runtime/frame.inline.hpp"
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -4742,6 +4742,25 @@
emit_int8((unsigned char)0xA5);
}
+void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) {
+ assert(VM_Version::supports_sse4_1(), "");
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+ int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int8(0x0B);
+ emit_int8((unsigned char)(0xC0 | encode));
+ emit_int8((unsigned char)rmode);
+}
+
+void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) {
+ assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+ simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int8(0x0B);
+ emit_operand(dst, src);
+ emit_int8((unsigned char)rmode);
+}
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@@ -5539,6 +5558,49 @@
emit_operand(dst, src);
}
+void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) {
+ assert(VM_Version::supports_avx(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+ int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int8(0x09);
+ emit_int8((unsigned char)(0xC0 | encode));
+ emit_int8((unsigned char)(rmode));
+}
+
+void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) {
+ assert(VM_Version::supports_avx(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+ vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int8(0x09);
+ emit_operand(dst, src);
+ emit_int8((unsigned char)(rmode));
+}
+
+void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) {
+ assert(VM_Version::supports_evex(), "requires EVEX support");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int8((unsigned char)0x09);
+ emit_int8((unsigned char)(0xC0 | encode));
+ emit_int8((unsigned char)(rmode));
+}
+
+void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) {
+ assert(VM_Version::supports_evex(), "requires EVEX support");
+ assert(dst != xnoreg, "sanity");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
+ vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int8((unsigned char)0x09);
+ emit_operand(dst, src);
+ emit_int8((unsigned char)(rmode));
+}
+
+
void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) {
assert(VM_Version::supports_avx(), "");
InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1856,6 +1856,9 @@
void sqrtsd(XMMRegister dst, Address src);
void sqrtsd(XMMRegister dst, XMMRegister src);
+ void roundsd(XMMRegister dst, Address src, int32_t rmode);
+ void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode);
+
// Compute Square Root of Scalar Single-Precision Floating-Point Value
void sqrtss(XMMRegister dst, Address src);
void sqrtss(XMMRegister dst, XMMRegister src);
@@ -2020,6 +2023,12 @@
void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len);
void vsqrtps(XMMRegister dst, Address src, int vector_len);
+ // Round Packed Double precision value.
+ void vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len);
+ void vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len);
+ void vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len);
+ void vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len);
+
// Bitwise Logical AND of Packed Floating-Point Values
void andpd(XMMRegister dst, XMMRegister src);
void andps(XMMRegister dst, XMMRegister src);
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -157,16 +157,7 @@
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
-
-#ifdef ASSERT
- Method* old_method = reinterpret_cast<Method*>(method_holder->data());
- address destination = jump->jump_destination();
- assert(old_method == NULL || old_method == callee() ||
- !old_method->method_holder()->is_loader_alive(),
- "a) MT-unsafe modification of inline cache");
- assert(destination == (address)-1 || destination == entry,
- "b) MT-unsafe modification of inline cache");
-#endif
+ verify_mt_safe(callee, entry, method_holder, jump);
// Update stub.
method_holder->set_data((intptr_t)callee());
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -271,9 +271,14 @@
if (borrow_reg) {
// No free registers available. Make one useful.
tmp = LP64_ONLY(rscratch1) NOT_LP64(rdx);
+ if (tmp == dst) {
+ tmp = LP64_ONLY(rscratch2) NOT_LP64(rcx);
+ }
__ push(tmp);
}
+ assert_different_registers(dst, tmp);
+
Label done;
__ movptr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ notptr(tmp);
--- a/src/hotspot/cpu/x86/gc/z/zArguments_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zArguments_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -23,20 +23,7 @@
#include "precompiled.hpp"
#include "gc/z/zArguments.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/debug.hpp"
void ZArguments::initialize_platform() {
-#ifdef COMPILER2
- // The C2 barrier slow path expects vector registers to be least
- // 16 bytes wide, which is the minimum width available on all
- // x86-64 systems. However, the user could have speficied a lower
- // number on the command-line, in which case we print a warning
- // and raise it to 16.
- if (MaxVectorSize < 16) {
- warning("ZGC requires MaxVectorSize to be at least 16");
- FLAG_SET_DEFAULT(MaxVectorSize, 16);
- }
-#endif
+ // Does nothing
}
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -24,22 +24,22 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
+#include "code/vmreg.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "memory/resourceArea.hpp"
-#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/z/c1/zBarrierSetC1.hpp"
#endif // COMPILER1
-
-ZBarrierSetAssembler::ZBarrierSetAssembler() :
- _load_barrier_slow_stub(),
- _load_barrier_weak_slow_stub() {}
+#ifdef COMPILER2
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif // COMPILER2
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -344,137 +344,327 @@
#endif // COMPILER1
+#ifdef COMPILER2
+
+OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
+ if (!OptoReg::is_reg(opto_reg)) {
+ return OptoReg::Bad;
+ }
+
+ const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+ if (vm_reg->is_XMMRegister()) {
+ opto_reg &= ~15;
+ switch (node->ideal_reg()) {
+ case Op_VecX:
+ opto_reg |= 2;
+ break;
+ case Op_VecY:
+ opto_reg |= 4;
+ break;
+ case Op_VecZ:
+ opto_reg |= 8;
+ break;
+ default:
+ opto_reg |= 1;
+ break;
+ }
+ }
+
+ return opto_reg;
+}
+
+// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
+extern int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
+ int stack_offset, int reg, uint ireg, outputStream* st);
+
#undef __
-#define __ cgen->assembler()->
+#define __ _masm->
+
+class ZSaveLiveRegisters {
+private:
+ struct XMMRegisterData {
+ XMMRegister _reg;
+ int _size;
+
+ // Used by GrowableArray::find()
+ bool operator == (const XMMRegisterData& other) {
+ return _reg == other._reg;
+ }
+ };
+
+ MacroAssembler* const _masm;
+ GrowableArray<Register> _gp_registers;
+ GrowableArray<XMMRegisterData> _xmm_registers;
+ int _spill_size;
+ int _spill_offset;
-// Generates a register specific stub for calling
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-//
-// The raddr register serves as both input and output for this stub. When the stub is
-// called the raddr register contains the object field address (oop*) where the bad oop
-// was loaded from, which caused the slow path to be taken. On return from the stub the
-// raddr register contains the good/healed oop returned from
-// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
-// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
-static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
- // Don't generate stub for invalid registers
- if (raddr == rsp || raddr == r15) {
- return NULL;
+ static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
+ if (left->_size == right->_size) {
+ return 0;
+ }
+
+ return (left->_size < right->_size) ? -1 : 1;
+ }
+
+ static int xmm_slot_size(OptoReg::Name opto_reg) {
+ // The low order 4 bytes denote what size of the XMM register is live
+ return (opto_reg & 15) << 3;
+ }
+
+ static uint xmm_ideal_reg_for_size(int reg_size) {
+ switch (reg_size) {
+ case 8:
+ return Op_VecD;
+ case 16:
+ return Op_VecX;
+ case 32:
+ return Op_VecY;
+ case 64:
+ return Op_VecZ;
+ default:
+ fatal("Invalid register size %d", reg_size);
+ return 0;
+ }
+ }
+
+ bool xmm_needs_vzeroupper() const {
+ return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
+ }
+
+ void xmm_register_save(const XMMRegisterData& reg_data) {
+ const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
+ const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
+ _spill_offset -= reg_data._size;
+ vec_spill_helper(__ code(), false /* do_size */, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+ }
+
+ void xmm_register_restore(const XMMRegisterData& reg_data) {
+ const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
+ const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
+ vec_spill_helper(__ code(), false /* do_size */, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+ _spill_offset += reg_data._size;
+ }
+
+ void gp_register_save(Register reg) {
+ _spill_offset -= 8;
+ __ movq(Address(rsp, _spill_offset), reg);
+ }
+
+ void gp_register_restore(Register reg) {
+ __ movq(reg, Address(rsp, _spill_offset));
+ _spill_offset += 8;
}
- // Create stub name
- char name[64];
- const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
- os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
+ void initialize(ZLoadBarrierStubC2* stub) {
+ // Create mask of caller saved registers that need to
+ // be saved/restored if live
+ RegMask caller_saved;
+ caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
+ caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
- __ align(CodeEntryAlignment);
- StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
- address start = __ pc();
+ // Create mask of live registers
+ RegMask live = stub->live();
+ if (stub->tmp() != noreg) {
+ live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
+ }
+
+ int gp_spill_size = 0;
+ int xmm_spill_size = 0;
+
+ // Record registers that needs to be saved/restored
+ while (live.is_NotEmpty()) {
+ const OptoReg::Name opto_reg = live.find_first_elem();
+ const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
+
+ live.Remove(opto_reg);
- // Save live registers
- if (raddr != rax) {
- __ push(rax);
- }
- if (raddr != rcx) {
- __ push(rcx);
- }
- if (raddr != rdx) {
- __ push(rdx);
- }
- if (raddr != rsi) {
- __ push(rsi);
- }
- if (raddr != rdi) {
- __ push(rdi);
- }
- if (raddr != r8) {
- __ push(r8);
- }
- if (raddr != r9) {
- __ push(r9);
- }
- if (raddr != r10) {
- __ push(r10);
- }
- if (raddr != r11) {
- __ push(r11);
+ if (vm_reg->is_Register()) {
+ if (caller_saved.Member(opto_reg)) {
+ _gp_registers.append(vm_reg->as_Register());
+ gp_spill_size += 8;
+ }
+ } else if (vm_reg->is_XMMRegister()) {
+ // We encode in the low order 4 bits of the opto_reg, how large part of the register is live
+ const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
+ const int reg_size = xmm_slot_size(opto_reg);
+ const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
+ const int reg_index = _xmm_registers.find(reg_data);
+ if (reg_index == -1) {
+ // Not previously appended
+ _xmm_registers.append(reg_data);
+ xmm_spill_size += reg_size;
+ } else {
+ // Previously appended, update size
+ const int reg_size_prev = _xmm_registers.at(reg_index)._size;
+ if (reg_size > reg_size_prev) {
+ _xmm_registers.at_put(reg_index, reg_data);
+ xmm_spill_size += reg_size - reg_size_prev;
+ }
+ }
+ } else {
+ fatal("Unexpected register type");
+ }
+ }
+
+ // Sort by size, largest first
+ _xmm_registers.sort(xmm_compare_register_size);
+
+ // Stack pointer must be 16 bytes aligned for the call
+ _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size, 16);
}
- // Setup arguments
- if (raddr != c_rarg1) {
- __ movq(c_rarg1, raddr);
- }
- __ movq(c_rarg0, Address(raddr, 0));
+public:
+ ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+ _masm(masm),
+ _gp_registers(),
+ _xmm_registers(),
+ _spill_size(0),
+ _spill_offset(0) {
- // Call barrier function
- __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+ //
+ // Stack layout after registers have been spilled:
+ //
+ // | ... | original rsp, 16 bytes aligned
+ // ------------------
+ // | zmm0 high |
+ // | ... |
+ // | zmm0 low | 16 bytes aligned
+ // | ... |
+ // | ymm1 high |
+ // | ... |
+ // | ymm1 low | 16 bytes aligned
+ // | ... |
+ // | xmmN high |
+ // | ... |
+ // | xmmN low | 8 bytes aligned
+ // | reg0 | 8 bytes aligned
+ // | reg1 |
+ // | ... |
+ // | regN | new rsp, if 16 bytes aligned
+ // | <padding> | else new rsp, 16 bytes aligned
+ // ------------------
+ //
- // Move result returned in rax to raddr, if needed
- if (raddr != rax) {
- __ movq(raddr, rax);
+ // Figure out what registers to save/restore
+ initialize(stub);
+
+ // Allocate stack space
+ if (_spill_size > 0) {
+ __ subptr(rsp, _spill_size);
+ }
+
+ // Save XMM/YMM/ZMM registers
+ for (int i = 0; i < _xmm_registers.length(); i++) {
+ xmm_register_save(_xmm_registers.at(i));
+ }
+
+ if (xmm_needs_vzeroupper()) {
+ __ vzeroupper();
+ }
+
+ // Save general purpose registers
+ for (int i = 0; i < _gp_registers.length(); i++) {
+ gp_register_save(_gp_registers.at(i));
+ }
}
- // Restore saved registers
- if (raddr != r11) {
- __ pop(r11);
- }
- if (raddr != r10) {
- __ pop(r10);
- }
- if (raddr != r9) {
- __ pop(r9);
- }
- if (raddr != r8) {
- __ pop(r8);
+ ~ZSaveLiveRegisters() {
+ // Restore general purpose registers
+ for (int i = _gp_registers.length() - 1; i >= 0; i--) {
+ gp_register_restore(_gp_registers.at(i));
+ }
+
+ __ vzeroupper();
+
+ // Restore XMM/YMM/ZMM registers
+ for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
+ xmm_register_restore(_xmm_registers.at(i));
+ }
+
+ // Free stack space
+ if (_spill_size > 0) {
+ __ addptr(rsp, _spill_size);
+ }
}
- if (raddr != rdi) {
- __ pop(rdi);
- }
- if (raddr != rsi) {
- __ pop(rsi);
- }
- if (raddr != rdx) {
- __ pop(rdx);
- }
- if (raddr != rcx) {
- __ pop(rcx);
- }
- if (raddr != rax) {
- __ pop(rax);
+};
+
+class ZSetupArguments {
+private:
+ MacroAssembler* const _masm;
+ const Register _ref;
+ const Address _ref_addr;
+
+public:
+ ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
+ _masm(masm),
+ _ref(stub->ref()),
+ _ref_addr(stub->ref_addr()) {
+
+ // Setup arguments
+ if (_ref_addr.base() == noreg) {
+ // No self healing
+ if (_ref != c_rarg0) {
+ __ movq(c_rarg0, _ref);
+ }
+ __ xorq(c_rarg1, c_rarg1);
+ } else {
+ // Self healing
+ if (_ref == c_rarg0) {
+ __ lea(c_rarg1, _ref_addr);
+ } else if (_ref != c_rarg1) {
+ __ lea(c_rarg1, _ref_addr);
+ __ movq(c_rarg0, _ref);
+ } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
+ __ movq(c_rarg0, _ref);
+ __ lea(c_rarg1, _ref_addr);
+ } else {
+ __ xchgq(c_rarg0, c_rarg1);
+ if (_ref_addr.base() == c_rarg0) {
+ __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
+ } else if (_ref_addr.index() == c_rarg0) {
+ __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+ }
}
- __ ret(0);
+ ~ZSetupArguments() {
+ // Transfer result
+ if (_ref != rax) {
+ __ movq(_ref, rax);
+ }
+ }
+};
+
+#undef __
+#define __ masm->
- return start;
+void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
+ BLOCK_COMMENT("ZLoadBarrierStubC2");
+
+ // Stub entry
+ __ bind(*stub->entry());
+
+ {
+ ZSaveLiveRegisters save_live_registers(masm, stub);
+ ZSetupArguments setup_arguments(masm, stub);
+ __ call(RuntimeAddress(stub->slow_path()));
+ }
+
+ // Stub exit
+ __ jmp(*stub->continuation());
}
#undef __
-static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) {
- const int nregs = RegisterImpl::number_of_registers;
- const int code_size = nregs * 128; // Rough estimate of code size
-
- ResourceMark rm;
-
- CodeBuffer buf(BufferBlob::create(label, code_size));
- StubCodeGenerator cgen(&buf);
-
- for (int i = 0; i < nregs; i++) {
- const Register reg = as_Register(i);
- stub[i] = generate_load_barrier_stub(&cgen, reg, decorators);
- }
-}
-
-void ZBarrierSetAssembler::barrier_stubs_init() {
- barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub);
- barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub);
-}
-
-address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) {
- return _load_barrier_slow_stub[reg->encoding()];
-}
-
-address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) {
- return _load_barrier_weak_slow_stub[reg->encoding()];
-}
+#endif // COMPILER2
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -24,6 +24,14 @@
#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+#include "code/vmreg.hpp"
+#include "oops/accessDecorators.hpp"
+#ifdef COMPILER2
+#include "opto/optoreg.hpp"
+#endif // COMPILER2
+
+class MacroAssembler;
+
#ifdef COMPILER1
class LIR_Assembler;
class LIR_OprDesc;
@@ -32,14 +40,13 @@
class ZLoadBarrierStubC1;
#endif // COMPILER1
+#ifdef COMPILER2
+class Node;
+class ZLoadBarrierStubC2;
+#endif // COMPILER2
+
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
-private:
- address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
- address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
-
public:
- ZBarrierSetAssembler();
-
virtual void load_at(MacroAssembler* masm,
DecoratorSet decorators,
BasicType type,
@@ -82,10 +89,13 @@
DecoratorSet decorators) const;
#endif // COMPILER1
- virtual void barrier_stubs_init();
+#ifdef COMPILER2
+ OptoReg::Name refine_register(const Node* node,
+ OptoReg::Name opto_reg);
- address load_barrier_slow_stub(Register reg);
- address load_barrier_weak_slow_stub(Register reg);
+ void generate_c2_load_barrier_stub(MacroAssembler* masm,
+ ZLoadBarrierStubC2* stub) const;
+#endif // COMPILER2
};
#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -40,7 +40,7 @@
// +--------------------------------+ 0x0000014000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000010000000000 (16TB)
-// | (Reserved, but unused) |
+// . .
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
@@ -75,7 +75,7 @@
// +--------------------------------+ 0x0000280000000000 (40TB)
// | Remapped View |
// +--------------------------------+ 0x0000200000000000 (32TB)
-// | (Reserved, but unused) |
+// . .
// +--------------------------------+ 0x0000180000000000 (24TB)
// | Marked1 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
@@ -110,7 +110,7 @@
// +--------------------------------+ 0x0000500000000000 (80TB)
// | Remapped View |
// +--------------------------------+ 0x0000400000000000 (64TB)
-// | (Reserved, but unused) |
+// . .
// +--------------------------------+ 0x0000300000000000 (48TB)
// | Marked1 View |
// +--------------------------------+ 0x0000200000000000 (32TB)
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -36,7 +36,6 @@
// ------------------------------------------------------------------
//
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
-const size_t ZPlatformMaxHeapSizeShift = 46; // 16TB
const size_t ZPlatformNMethodDisarmedOffset = 4;
const size_t ZPlatformCacheLineSize = 64;
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad Fri Oct 11 12:08:01 2019 +0530
@@ -24,190 +24,144 @@
source_hpp %{
#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
%}
source %{
-#include "gc/z/zBarrierSetAssembler.hpp"
+static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
+ __ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+ __ jcc(Assembler::notZero, *stub->entry());
+ __ bind(*stub->continuation());
+}
-static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
- assert(dst != rsp, "Invalid register");
- assert(dst != r15, "Invalid register");
-
- const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
- : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
- __ lea(dst, src);
- __ call(RuntimeAddress(stub));
+static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
+ __ jmp(*stub->entry());
+ __ bind(*stub->continuation());
}
%}
-// For XMM and YMM enabled processors
-instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
- rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
- rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
- rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
- match(Set dst (LoadBarrierSlowReg src dst));
- predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak());
+// Load Pointer
+instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
+%{
+ predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
+ match(Set dst (LoadP mem));
+ effect(KILL cr, TEMP dst);
- effect(KILL cr,
- KILL x0, KILL x1, KILL x2, KILL x3,
- KILL x4, KILL x5, KILL x6, KILL x7,
- KILL x8, KILL x9, KILL x10, KILL x11,
- KILL x12, KILL x13, KILL x14, KILL x15);
+ ins_cost(125);
- format %{ "lea $dst, $src\n\t"
- "call #ZLoadBarrierSlowPath" %}
+ format %{ "movq $dst, $mem" %}
ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
+ __ movptr($dst$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, false /* weak */);
+ }
%}
- ins_pipe(pipe_slow);
+
+ ins_pipe(ialu_reg_mem);
%}
-// For ZMM enabled processors
-instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
- rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
- rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
- rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
- rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
- rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
- rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
- rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+// Load Weak Pointer
+instruct zLoadWeakP(rRegP dst, memory mem, rFlagsReg cr)
+%{
+ predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierWeak);
+ match(Set dst (LoadP mem));
+ effect(KILL cr, TEMP dst);
- match(Set dst (LoadBarrierSlowReg src dst));
- predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak());
+ ins_cost(125);
- effect(KILL cr,
- KILL x0, KILL x1, KILL x2, KILL x3,
- KILL x4, KILL x5, KILL x6, KILL x7,
- KILL x8, KILL x9, KILL x10, KILL x11,
- KILL x12, KILL x13, KILL x14, KILL x15,
- KILL x16, KILL x17, KILL x18, KILL x19,
- KILL x20, KILL x21, KILL x22, KILL x23,
- KILL x24, KILL x25, KILL x26, KILL x27,
- KILL x28, KILL x29, KILL x30, KILL x31);
-
- format %{ "lea $dst, $src\n\t"
- "call #ZLoadBarrierSlowPath" %}
+ format %{ "movq $dst, $mem" %}
ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
+ __ movptr($dst$$Register, $mem$$Address);
+ z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, true /* weak */);
%}
- ins_pipe(pipe_slow);
-%}
-
-// For XMM and YMM enabled processors
-instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
- rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
- rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
- rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
- match(Set dst (LoadBarrierSlowReg src dst));
- predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak());
- effect(KILL cr,
- KILL x0, KILL x1, KILL x2, KILL x3,
- KILL x4, KILL x5, KILL x6, KILL x7,
- KILL x8, KILL x9, KILL x10, KILL x11,
- KILL x12, KILL x13, KILL x14, KILL x15);
-
- format %{ "lea $dst, $src\n\t"
- "call #ZLoadBarrierSlowPath" %}
-
- ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
- %}
- ins_pipe(pipe_slow);
+ ins_pipe(ialu_reg_mem);
%}
-// For ZMM enabled processors
-instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
- rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
- rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
- rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
- rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
- rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
- rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
- rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+instruct zCompareAndExchangeP(memory mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
+ match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr, TEMP tmp);
- match(Set dst (LoadBarrierSlowReg src dst));
- predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak());
-
- effect(KILL cr,
- KILL x0, KILL x1, KILL x2, KILL x3,
- KILL x4, KILL x5, KILL x6, KILL x7,
- KILL x8, KILL x9, KILL x10, KILL x11,
- KILL x12, KILL x13, KILL x14, KILL x15,
- KILL x16, KILL x17, KILL x18, KILL x19,
- KILL x20, KILL x21, KILL x22, KILL x23,
- KILL x24, KILL x25, KILL x26, KILL x27,
- KILL x28, KILL x29, KILL x30, KILL x31);
-
- format %{ "lea $dst, $src\n\t"
- "call #ZLoadBarrierSlowPath" %}
+ format %{ "lock\n\t"
+ "cmpxchgq $newval, $mem" %}
ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
+ if (barrier_data() != ZLoadBarrierElided) {
+ __ movptr($tmp$$Register, $oldval$$Register);
+ }
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+ __ jcc(Assembler::zero, good);
+ z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
+ __ movptr($oldval$$Register, $tmp$$Register);
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ __ bind(good);
+ }
%}
- ins_pipe(pipe_slow);
+
+ ins_pipe(pipe_cmpxchg);
%}
-// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
-// but doesn't affect output.
+instruct zCompareAndSwapP(rRegI res, memory mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr, KILL oldval, TEMP tmp);
+
+ format %{ "lock\n\t"
+ "cmpxchgq $newval, $mem\n\t"
+ "sete $res\n\t"
+ "movzbl $res, $res" %}
-instruct z_compareAndExchangeP(
- memory mem_ptr,
- rax_RegP oldval, rRegP newval, rRegP keepalive,
- rFlagsReg cr) %{
- predicate(VM_Version::supports_cx8());
- match(Set oldval (ZCompareAndExchangeP (Binary mem_ptr keepalive) (Binary oldval newval)));
- effect(KILL cr);
+ ins_encode %{
+ if (barrier_data() != ZLoadBarrierElided) {
+ __ movptr($tmp$$Register, $oldval$$Register);
+ }
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+ __ jcc(Assembler::zero, good);
+ z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
+ __ movptr($oldval$$Register, $tmp$$Register);
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ __ bind(good);
+ __ cmpptr($tmp$$Register, $oldval$$Register);
+ }
+ __ setb(Assembler::equal, $res$$Register);
+ __ movzbl($res$$Register, $res$$Register);
+ %}
- format %{ "cmpxchgq $mem_ptr,$newval\t# "
- "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
- opcode(0x0F, 0xB1);
- ins_encode(lock_prefix,
- REX_reg_mem_wide(newval, mem_ptr),
- OpcP, OpcS,
- reg_mem(newval, mem_ptr) // lock cmpxchg
- );
- ins_pipe( pipe_cmpxchg );
+ ins_pipe(pipe_cmpxchg);
%}
-instruct z_compareAndSwapP(rRegI res,
- memory mem_ptr,
- rax_RegP oldval, rRegP newval, rRegP keepalive,
- rFlagsReg cr) %{
- predicate(VM_Version::supports_cx8());
- match(Set res (ZCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
- match(Set res (ZWeakCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
- effect(KILL cr, KILL oldval);
+instruct zXChgP(memory mem, rRegP newval, rFlagsReg cr) %{
+ match(Set newval (GetAndSetP mem newval));
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr);
+
+ format %{ "xchgq $newval, $mem" %}
- format %{ "cmpxchgq $mem_ptr,$newval\t# "
- "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
- "sete $res\n\t"
- "movzbl $res, $res" %}
- opcode(0x0F, 0xB1);
- ins_encode(lock_prefix,
- REX_reg_mem_wide(newval, mem_ptr),
- OpcP, OpcS,
- reg_mem(newval, mem_ptr),
- REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
- REX_reg_breg(res, res), // movzbl
- Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
- ins_pipe( pipe_cmpxchg );
+ ins_encode %{
+ __ xchgptr($newval$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, false /* weak */);
+ }
+ %}
+
+ ins_pipe(pipe_cmpxchg);
%}
-
-instruct z_xchgP( memory mem, rRegP newval, rRegP keepalive) %{
- match(Set newval (ZGetAndSetP mem (Binary newval keepalive)));
- format %{ "XCHGQ $newval,[$mem]" %}
- ins_encode %{
- __ xchgq($newval$$Register, $mem$$Address);
- %}
- ins_pipe( pipe_cmpxchg );
-%}
--- a/src/hotspot/cpu/x86/globals_x86.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/globals_x86.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -211,5 +211,15 @@
"Use BMI2 instructions") \
\
diagnostic(bool, UseLibmIntrinsic, true, \
- "Use Libm Intrinsics")
+ "Use Libm Intrinsics") \
+ \
+ /* Minimum array size in bytes to use AVX512 intrinsics */ \
+ /* for copy, inflate and fill which don't bail out early based on any */ \
+ /* condition. When this value is set to zero compare operations like */ \
+ /* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
+ diagnostic(int, AVX3Threshold, 4096, \
+ "Minimum array size in bytes to use AVX512 intrinsics" \
+ "for copy, inflate and fill. When this value is set as zero" \
+ "compare operations can also use AVX512 intrinsics.") \
+ range(0, max_jint)
#endif // CPU_X86_GLOBALS_X86_HPP
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -824,11 +824,13 @@
}
void MacroAssembler::stop(const char* msg) {
- address rip = pc();
- pusha(); // get regs on stack
+ if (ShowMessageBoxOnError) {
+ address rip = pc();
+ pusha(); // get regs on stack
+ lea(c_rarg1, InternalAddress(rip));
+ movq(c_rarg2, rsp); // pass pointer to regs array
+ }
lea(c_rarg0, ExternalAddress((address) msg));
- lea(c_rarg1, InternalAddress(rip));
- movq(c_rarg2, rsp); // pass pointer to regs array
andq(rsp, -16); // align stack as required by ABI
call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
hlt();
@@ -3661,6 +3663,15 @@
}
}
+void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg) {
+ if (reachable(src)) {
+ Assembler::roundsd(dst, as_Address(src), rmode);
+ } else {
+ lea(scratch_reg, src);
+ Assembler::roundsd(dst, Address(scratch_reg, 0), rmode);
+ }
+}
+
void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
Assembler::subss(dst, as_Address(src));
@@ -6584,7 +6595,7 @@
bind(COMPARE_WIDE_VECTORS_LOOP);
#ifdef _LP64
- if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
+ if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
cmpl(cnt2, stride2x2);
jccb(Assembler::below, COMPARE_WIDE_VECTORS_LOOP_AVX2);
testl(cnt2, stride2x2-1); // cnt2 holds the vector count
@@ -6844,7 +6855,7 @@
testl(len, len);
jcc(Assembler::zero, FALSE_LABEL);
- if ((UseAVX > 2) && // AVX512
+ if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
VM_Version::supports_avx512vlbw() &&
VM_Version::supports_bmi2()) {
@@ -6917,7 +6928,7 @@
} else {
movl(result, len); // copy
- if (UseAVX == 2 && UseSSE >= 2) {
+ if (UseAVX >= 2 && UseSSE >= 2) {
// With AVX2, use 32-byte vector compare
Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
@@ -7090,14 +7101,12 @@
lea(ary2, Address(ary2, limit, Address::times_1));
negptr(limit);
- bind(COMPARE_WIDE_VECTORS);
-
#ifdef _LP64
- if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
+ if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
Label COMPARE_WIDE_VECTORS_LOOP_AVX2, COMPARE_WIDE_VECTORS_LOOP_AVX3;
cmpl(limit, -64);
- jccb(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2);
+ jcc(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2);
bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop
@@ -7130,7 +7139,7 @@
}//if (VM_Version::supports_avx512vlbw())
#endif //_LP64
-
+ bind(COMPARE_WIDE_VECTORS);
vmovdqu(vec1, Address(ary1, limit, Address::times_1));
vmovdqu(vec2, Address(ary2, limit, Address::times_1));
vpxor(vec1, vec2);
@@ -7356,32 +7365,33 @@
assert( UseSSE >= 2, "supported cpu only" );
Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
movdl(xtmp, value);
- if (UseAVX > 2 && UseUnalignedLoadStores) {
+ if (UseAVX >= 2 && UseUnalignedLoadStores) {
+ Label L_check_fill_32_bytes;
+ if (UseAVX > 2) {
+ // Fill 64-byte chunks
+ Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
+
+ // If number of bytes to fill < AVX3Threshold, perform fill using AVX2
+ cmpl(count, AVX3Threshold);
+ jccb(Assembler::below, L_check_fill_64_bytes_avx2);
+
+ vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
+
+ subl(count, 16 << shift);
+ jccb(Assembler::less, L_check_fill_32_bytes);
+ align(16);
+
+ BIND(L_fill_64_bytes_loop_avx3);
+ evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
+ addptr(to, 64);
+ subl(count, 16 << shift);
+ jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
+ jmpb(L_check_fill_32_bytes);
+
+ BIND(L_check_fill_64_bytes_avx2);
+ }
// Fill 64-byte chunks
- Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
- vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
-
- subl(count, 16 << shift);
- jcc(Assembler::less, L_check_fill_32_bytes);
- align(16);
-
- BIND(L_fill_64_bytes_loop);
- evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit);
- addptr(to, 64);
- subl(count, 16 << shift);
- jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
-
- BIND(L_check_fill_32_bytes);
- addl(count, 8 << shift);
- jccb(Assembler::less, L_check_fill_8_bytes);
- vmovdqu(Address(to, 0), xtmp);
- addptr(to, 32);
- subl(count, 8 << shift);
-
- BIND(L_check_fill_8_bytes);
- } else if (UseAVX == 2 && UseUnalignedLoadStores) {
- // Fill 64-byte chunks
- Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
+ Label L_fill_64_bytes_loop;
vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit);
subl(count, 16 << shift);
@@ -8095,12 +8105,13 @@
shlq(length);
xorq(result, result);
- if ((UseAVX > 2) &&
+ if ((AVX3Threshold == 0) && (UseAVX > 2) &&
VM_Version::supports_avx512vlbw()) {
Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
cmpq(length, 64);
jcc(Assembler::less, VECTOR32_TAIL);
+
movq(tmp1, length);
andq(tmp1, 0x3F); // tail count
andq(length, ~(0x3F)); //vector count
@@ -9557,7 +9568,7 @@
// save length for return
push(len);
- if ((UseAVX > 2) && // AVX512
+ if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512
VM_Version::supports_avx512vlbw() &&
VM_Version::supports_bmi2()) {
@@ -9749,7 +9760,7 @@
// }
void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len,
XMMRegister tmp1, Register tmp2) {
- Label copy_chars_loop, done, below_threshold;
+ Label copy_chars_loop, done, below_threshold, avx3_threshold;
// rsi: src
// rdi: dst
// rdx: len
@@ -9759,7 +9770,7 @@
// rdi holds start addr of destination char[]
// rdx holds length
assert_different_registers(src, dst, len, tmp2);
-
+ movl(tmp2, len);
if ((UseAVX > 2) && // AVX512
VM_Version::supports_avx512vlbw() &&
VM_Version::supports_bmi2()) {
@@ -9771,9 +9782,11 @@
testl(len, -16);
jcc(Assembler::zero, below_threshold);
+ testl(len, -1 * AVX3Threshold);
+ jcc(Assembler::zero, avx3_threshold);
+
// In order to use only one arithmetic operation for the main loop we use
// this pre-calculation
- movl(tmp2, len);
andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop
andl(len, -32); // vector count
jccb(Assembler::zero, copy_tail);
@@ -9804,12 +9817,11 @@
evmovdquw(Address(dst, 0), k2, tmp1, Assembler::AVX_512bit);
jmp(done);
+ bind(avx3_threshold);
}
if (UseSSE42Intrinsics) {
Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
- movl(tmp2, len);
-
if (UseAVX > 1) {
andl(tmp2, (16 - 1));
andl(len, -16);
@@ -9834,13 +9846,7 @@
bind(below_threshold);
bind(copy_new_tail);
- if ((UseAVX > 2) &&
- VM_Version::supports_avx512vlbw() &&
- VM_Version::supports_bmi2()) {
- movl(tmp2, len);
- } else {
- movl(len, tmp2);
- }
+ movl(len, tmp2);
andl(tmp2, 0x00000007);
andl(len, 0xFFFFFFF8);
jccb(Assembler::zero, copy_tail);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1180,6 +1180,10 @@
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, AddressLiteral src);
+ void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
+ void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
+ void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg);
+
void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
void sqrtss(XMMRegister dst, AddressLiteral src);
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1288,30 +1288,58 @@
if (UseUnalignedLoadStores) {
Label L_end;
// Copy 64-bytes per iteration
- __ BIND(L_loop);
if (UseAVX > 2) {
+ Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold;
+
+ __ BIND(L_copy_bytes);
+ __ cmpptr(qword_count, (-1 * AVX3Threshold / 8));
+ __ jccb(Assembler::less, L_above_threshold);
+ __ jmpb(L_below_threshold);
+
+ __ bind(L_loop_avx512);
__ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit);
__ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit);
- } else if (UseAVX == 2) {
+ __ bind(L_above_threshold);
+ __ addptr(qword_count, 8);
+ __ jcc(Assembler::lessEqual, L_loop_avx512);
+ __ jmpb(L_32_byte_head);
+
+ __ bind(L_loop_avx2);
__ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
__ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
__ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
__ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
+ __ bind(L_below_threshold);
+ __ addptr(qword_count, 8);
+ __ jcc(Assembler::lessEqual, L_loop_avx2);
+
+ __ bind(L_32_byte_head);
+ __ subptr(qword_count, 4); // sub(8) and add(4)
+ __ jccb(Assembler::greater, L_end);
} else {
- __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
- __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
- __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
- __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
- __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
- __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
- __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
- __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
+ __ BIND(L_loop);
+ if (UseAVX == 2) {
+ __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
+ __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
+ __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
+ __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
+ } else {
+ __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
+ __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
+ __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
+ __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
+ __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
+ __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
+ __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
+ __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
+ }
+
+ __ BIND(L_copy_bytes);
+ __ addptr(qword_count, 8);
+ __ jcc(Assembler::lessEqual, L_loop);
+ __ subptr(qword_count, 4); // sub(8) and add(4)
+ __ jccb(Assembler::greater, L_end);
}
- __ BIND(L_copy_bytes);
- __ addptr(qword_count, 8);
- __ jcc(Assembler::lessEqual, L_loop);
- __ subptr(qword_count, 4); // sub(8) and add(4)
- __ jccb(Assembler::greater, L_end);
// Copy trailing 32 bytes
if (UseAVX >= 2) {
__ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
@@ -1368,31 +1396,59 @@
if (UseUnalignedLoadStores) {
Label L_end;
// Copy 64-bytes per iteration
- __ BIND(L_loop);
if (UseAVX > 2) {
+ Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold;
+
+ __ BIND(L_copy_bytes);
+ __ cmpptr(qword_count, (AVX3Threshold / 8));
+ __ jccb(Assembler::greater, L_above_threshold);
+ __ jmpb(L_below_threshold);
+
+ __ BIND(L_loop_avx512);
__ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit);
__ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit);
- } else if (UseAVX == 2) {
+ __ bind(L_above_threshold);
+ __ subptr(qword_count, 8);
+ __ jcc(Assembler::greaterEqual, L_loop_avx512);
+ __ jmpb(L_32_byte_head);
+
+ __ bind(L_loop_avx2);
__ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
__ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
- __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
- __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
+ __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
+ __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
+ __ bind(L_below_threshold);
+ __ subptr(qword_count, 8);
+ __ jcc(Assembler::greaterEqual, L_loop_avx2);
+
+ __ bind(L_32_byte_head);
+ __ addptr(qword_count, 4); // add(8) and sub(4)
+ __ jccb(Assembler::less, L_end);
} else {
- __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
- __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
- __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
- __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
- __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
- __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
- __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0));
- __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3);
+ __ BIND(L_loop);
+ if (UseAVX == 2) {
+ __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
+ __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
+ __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
+ __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
+ } else {
+ __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
+ __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
+ __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
+ __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
+ __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
+ __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
+ __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0));
+ __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3);
+ }
+
+ __ BIND(L_copy_bytes);
+ __ subptr(qword_count, 8);
+ __ jcc(Assembler::greaterEqual, L_loop);
+
+ __ addptr(qword_count, 4); // add(8) and sub(4)
+ __ jccb(Assembler::less, L_end);
}
- __ BIND(L_copy_bytes);
- __ subptr(qword_count, 8);
- __ jcc(Assembler::greaterEqual, L_loop);
-
- __ addptr(qword_count, 4); // add(8) and sub(4)
- __ jccb(Assembler::less, L_end);
// Copy trailing 32 bytes
if (UseAVX >= 2) {
__ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0));
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -381,6 +381,10 @@
__ cmpl(rax, 0xE0);
__ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+ __ movl(rax, Address(rsi, 0));
+ __ cmpl(rax, 0x50654); // If it is Skylake
+ __ jcc(Assembler::equal, legacy_setup);
// If UseAVX is unitialized or is set by the user to include EVEX
if (use_evex) {
// EVEX setup: run in lowest evex mode
@@ -465,6 +469,11 @@
__ cmpl(rax, 0xE0);
__ jcc(Assembler::notEqual, legacy_save_restore);
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+ __ movl(rax, Address(rsi, 0));
+ __ cmpl(rax, 0x50654); // If it is Skylake
+ __ jcc(Assembler::equal, legacy_save_restore);
+
// If UseAVX is unitialized or is set by the user to include EVEX
if (use_evex) {
// EVEX check: run in lowest evex mode
@@ -660,6 +669,9 @@
}
if (FLAG_IS_DEFAULT(UseAVX)) {
FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
+ if (is_intel_family_core() && _model == CPU_MODEL_SKYLAKE && _stepping < 5) {
+ FLAG_SET_DEFAULT(UseAVX, 2); //Set UseAVX=2 for Skylake
+ }
} else if (UseAVX > use_avx_limit) {
warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
@@ -1059,6 +1071,13 @@
}
#endif // COMPILER2 && ASSERT
+ if (!FLAG_IS_DEFAULT(AVX3Threshold)) {
+ if (!is_power_of_2(AVX3Threshold)) {
+ warning("AVX3Threshold must be a power of 2");
+ FLAG_SET_DEFAULT(AVX3Threshold, 4096);
+ }
+ }
+
#ifdef _LP64
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true;
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -366,7 +366,7 @@
CPU_MODEL_HASWELL_E3 = 0x3c,
CPU_MODEL_HASWELL_E7 = 0x3f,
CPU_MODEL_BROADWELL = 0x3d,
- CPU_MODEL_SKYLAKE = CPU_MODEL_HASWELL_E3
+ CPU_MODEL_SKYLAKE = 0x55
};
// cpuid information block. All info derived from executing cpuid with
--- a/src/hotspot/cpu/x86/x86.ad Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/x86.ad Fri Oct 11 12:08:01 2019 +0530
@@ -1097,138 +1097,6 @@
reg_class_dynamic vectorz_reg(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() %} );
reg_class_dynamic vectorz_reg_vl(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} );
-reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
-reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
-reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
-
-reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
-reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
-reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
-
-reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
-reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
-reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
-
-reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
-reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
-reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
-
-reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
-reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
-reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
-
-reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
-reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
-reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
-
-reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
-reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
-reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
-
-reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
-reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
-reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
-
-#ifdef _LP64
-
-reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
-reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
-reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
-
-reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
-reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
-reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
-
-reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
-reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
-reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
-
-reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
-reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
-reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
-
-reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
-reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
-reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
-
-reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
-reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
-reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
-
-reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
-reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
-reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
-
-reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
-reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
-reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
-
-reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
-reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
-reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
-
-reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
-reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
-reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
-
-reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
-reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
-reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
-
-reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
-reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
-reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
-
-reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
-reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
-reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
-
-reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
-reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
-reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
-
-reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
-reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
-reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
-
-reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
-reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
-reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
-
-reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
-reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
-reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
-
-reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
-reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
-reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
-
-reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
-reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
-reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
-
-reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
-reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
-reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
-
-reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
-reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
-reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
-
-reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
-reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
-reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
-
-reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
-reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
-reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
-
-reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
-reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
-reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
-
-#endif
-
%}
@@ -1485,6 +1353,10 @@
ret_value = false;
}
break;
+ case Op_RoundDoubleMode:
+ if (UseSSE < 4)
+ ret_value = false;
+ break;
}
return ret_value; // Per default match rules are supported.
@@ -1536,6 +1408,10 @@
if (vlen != 4)
ret_value = false;
break;
+ case Op_RoundDoubleModeV:
+ if (VM_Version::supports_avx() == false)
+ ret_value = false;
+ break;
}
}
@@ -1792,8 +1668,8 @@
return (UseAVX > 2) ? 6 : 4;
}
-static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
- int stack_offset, int reg, uint ireg, outputStream* st) {
+int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
+ int stack_offset, int reg, uint ireg, outputStream* st) {
// In 64-bit VM size calculation is very complex. Emitting instructions
// into scratch buffer is used to get size in 64-bit VM.
LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); )
@@ -2854,6 +2730,110 @@
ins_pipe(pipe_slow);
%}
+
+#ifdef _LP64
+instruct roundD_reg(legRegD dst, legRegD src, immU8 rmode) %{
+ predicate(UseSSE>=4);
+ match(Set dst (RoundDoubleMode src rmode));
+ format %{ "roundsd $dst, $src" %}
+ ins_cost(150);
+ ins_encode %{
+ __ roundsd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct roundD_mem(legRegD dst, memory src, immU8 rmode) %{
+ predicate(UseSSE>=4);
+ match(Set dst (RoundDoubleMode (LoadD src) rmode));
+ format %{ "roundsd $dst, $src" %}
+ ins_cost(150);
+ ins_encode %{
+ __ roundsd($dst$$XMMRegister, $src$$Address, $rmode$$constant);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct roundD_imm(legRegD dst, immD con, immU8 rmode, rRegI scratch_reg) %{
+ predicate(UseSSE>=4);
+ match(Set dst (RoundDoubleMode con rmode));
+ effect(TEMP scratch_reg);
+ format %{ "roundsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
+ ins_cost(150);
+ ins_encode %{
+ __ roundsd($dst$$XMMRegister, $constantaddress($con), $rmode$$constant, $scratch_reg$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct vround2D_reg(legVecX dst, legVecX src, immU8 rmode) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+ match(Set dst (RoundDoubleModeV src rmode));
+ format %{ "vroundpd $dst, $src, $rmode\t! round packed2D" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vround2D_mem(legVecX dst, memory mem, immU8 rmode) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+ match(Set dst (RoundDoubleModeV (LoadVector mem) rmode));
+ format %{ "vroundpd $dst, $mem, $rmode\t! round packed2D" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vround4D_reg(legVecY dst, legVecY src, legVecY rmode) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+ match(Set dst (RoundDoubleModeV src rmode));
+ format %{ "vroundpd $dst, $src, $rmode\t! round packed4D" %}
+ ins_encode %{
+ int vector_len = 1;
+ __ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vround4D_mem(legVecY dst, memory mem, immU8 rmode) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+ match(Set dst (RoundDoubleModeV (LoadVector mem) rmode));
+ format %{ "vroundpd $dst, $mem, $rmode\t! round packed4D" %}
+ ins_encode %{
+ int vector_len = 1;
+ __ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+
+instruct vround8D_reg(vecZ dst, vecZ src, immU8 rmode) %{
+ predicate(UseAVX > 2 && n->as_Vector()->length() == 8);
+ match(Set dst (RoundDoubleModeV src rmode));
+ format %{ "vrndscalepd $dst, $src, $rmode\t! round packed8D" %}
+ ins_encode %{
+ int vector_len = 2;
+ __ vrndscalepd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vround8D_mem(vecZ dst, memory mem, immU8 rmode) %{
+ predicate(UseAVX > 2 && n->as_Vector()->length() == 8);
+ match(Set dst (RoundDoubleModeV (LoadVector mem) rmode));
+ format %{ "vrndscalepd $dst, $mem, $rmode\t! round packed8D" %}
+ ins_encode %{
+ int vector_len = 2;
+ __ vrndscalepd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+#endif // _LP64
+
instruct onspinwait() %{
match(OnSpinWait);
ins_cost(200);
@@ -3749,7 +3729,7 @@
%}
instruct Repl2F_zero(vecD dst, immF0 zero) %{
- predicate(n->as_Vector()->length() == 2 && UseAVX < 3);
+ predicate(n->as_Vector()->length() == 2);
match(Set dst (ReplicateF zero));
format %{ "xorps $dst,$dst\t! replicate2F zero" %}
ins_encode %{
@@ -3759,7 +3739,7 @@
%}
instruct Repl4F_zero(vecX dst, immF0 zero) %{
- predicate(n->as_Vector()->length() == 4 && UseAVX < 3);
+ predicate(n->as_Vector()->length() == 4);
match(Set dst (ReplicateF zero));
format %{ "xorps $dst,$dst\t! replicate4F zero" %}
ins_encode %{
@@ -3769,7 +3749,7 @@
%}
instruct Repl8F_zero(vecY dst, immF0 zero) %{
- predicate(n->as_Vector()->length() == 8 && UseAVX < 3);
+ predicate(n->as_Vector()->length() == 8 && UseAVX > 0);
match(Set dst (ReplicateF zero));
format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %}
ins_encode %{
@@ -3843,7 +3823,7 @@
// Replicate double (8 byte) scalar zero to be vector
instruct Repl2D_zero(vecX dst, immD0 zero) %{
- predicate(n->as_Vector()->length() == 2 && UseAVX < 3);
+ predicate(n->as_Vector()->length() == 2);
match(Set dst (ReplicateD zero));
format %{ "xorpd $dst,$dst\t! replicate2D zero" %}
ins_encode %{
@@ -3853,7 +3833,7 @@
%}
instruct Repl4D_zero(vecY dst, immD0 zero) %{
- predicate(n->as_Vector()->length() == 4 && UseAVX < 3);
+ predicate(n->as_Vector()->length() == 4 && UseAVX > 0);
match(Set dst (ReplicateD zero));
format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %}
ins_encode %{
@@ -4778,42 +4758,6 @@
ins_pipe( pipe_slow );
%}
-instruct Repl2F_zero_evex(vecD dst, immF0 zero) %{
- predicate(n->as_Vector()->length() == 2 && UseAVX > 2);
- match(Set dst (ReplicateF zero));
- format %{ "vpxor $dst k0,$dst,$dst\t! replicate2F zero" %}
- ins_encode %{
- // Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
- int vector_len = 2;
- __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
- %}
- ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl4F_zero_evex(vecX dst, immF0 zero) %{
- predicate(n->as_Vector()->length() == 4 && UseAVX > 2);
- match(Set dst (ReplicateF zero));
- format %{ "vpxor $dst k0,$dst,$dst\t! replicate4F zero" %}
- ins_encode %{
- // Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
- int vector_len = 2;
- __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
- %}
- ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl8F_zero_evex(vecY dst, immF0 zero) %{
- predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
- match(Set dst (ReplicateF zero));
- format %{ "vpxor $dst k0,$dst,$dst\t! replicate8F zero" %}
- ins_encode %{
- // Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation
- int vector_len = 2;
- __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
- %}
- ins_pipe( fpu_reg_reg );
-%}
-
instruct Repl16F_zero_evex(vecZ dst, immF0 zero) %{
predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
match(Set dst (ReplicateF zero));
@@ -4870,30 +4814,6 @@
ins_pipe( pipe_slow );
%}
-instruct Repl2D_zero_evex(vecX dst, immD0 zero) %{
- predicate(n->as_Vector()->length() == 2 && UseAVX > 2);
- match(Set dst (ReplicateD zero));
- format %{ "vpxor $dst k0,$dst,$dst\t! replicate2D zero" %}
- ins_encode %{
- // Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation
- int vector_len = 2;
- __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
- %}
- ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl4D_zero_evex(vecY dst, immD0 zero) %{
- predicate(n->as_Vector()->length() == 4 && UseAVX > 2);
- match(Set dst (ReplicateD zero));
- format %{ "vpxor $dst k0,$dst,$dst\t! replicate4D zero" %}
- ins_encode %{
- // Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation
- int vector_len = 2;
- __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
- %}
- ins_pipe( fpu_reg_reg );
-%}
-
instruct Repl8D_zero_evex(vecZ dst, immD0 zero) %{
predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
match(Set dst (ReplicateD zero));
--- a/src/hotspot/cpu/x86/x86_64.ad Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/cpu/x86/x86_64.ad Fri Oct 11 12:08:01 2019 +0530
@@ -1058,8 +1058,8 @@
static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st);
-static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
- int stack_offset, int reg, uint ireg, outputStream* st);
+int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
+ int stack_offset, int reg, uint ireg, outputStream* st);
static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
int dst_offset, uint ireg, outputStream* st) {
@@ -4260,200 +4260,6 @@
%}
%}
-// Operands for bound floating pointer register arguments
-operand rxmm0() %{
- constraint(ALLOC_IN_RC(xmm0_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm1() %{
- constraint(ALLOC_IN_RC(xmm1_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm2() %{
- constraint(ALLOC_IN_RC(xmm2_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm3() %{
- constraint(ALLOC_IN_RC(xmm3_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm4() %{
- constraint(ALLOC_IN_RC(xmm4_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm5() %{
- constraint(ALLOC_IN_RC(xmm5_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm6() %{
- constraint(ALLOC_IN_RC(xmm6_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm7() %{
- constraint(ALLOC_IN_RC(xmm7_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm8() %{
- constraint(ALLOC_IN_RC(xmm8_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm9() %{
- constraint(ALLOC_IN_RC(xmm9_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm10() %{
- constraint(ALLOC_IN_RC(xmm10_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm11() %{
- constraint(ALLOC_IN_RC(xmm11_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm12() %{
- constraint(ALLOC_IN_RC(xmm12_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm13() %{
- constraint(ALLOC_IN_RC(xmm13_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm14() %{
- constraint(ALLOC_IN_RC(xmm14_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm15() %{
- constraint(ALLOC_IN_RC(xmm15_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm16() %{
- constraint(ALLOC_IN_RC(xmm16_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm17() %{
- constraint(ALLOC_IN_RC(xmm17_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm18() %{
- constraint(ALLOC_IN_RC(xmm18_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm19() %{
- constraint(ALLOC_IN_RC(xmm19_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm20() %{
- constraint(ALLOC_IN_RC(xmm20_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm21() %{
- constraint(ALLOC_IN_RC(xmm21_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm22() %{
- constraint(ALLOC_IN_RC(xmm22_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm23() %{
- constraint(ALLOC_IN_RC(xmm23_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm24() %{
- constraint(ALLOC_IN_RC(xmm24_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm25() %{
- constraint(ALLOC_IN_RC(xmm25_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm26() %{
- constraint(ALLOC_IN_RC(xmm26_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm27() %{
- constraint(ALLOC_IN_RC(xmm27_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm28() %{
- constraint(ALLOC_IN_RC(xmm28_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm29() %{
- constraint(ALLOC_IN_RC(xmm29_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm30() %{
- constraint(ALLOC_IN_RC(xmm30_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-operand rxmm31() %{
- constraint(ALLOC_IN_RC(xmm31_reg));
- match(VecX);
- format%{%}
- interface(REG_INTER);
-%}
-
//----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify
// instruction definitions by not requiring the AD writer to specify separate
@@ -5346,6 +5152,7 @@
instruct loadP(rRegP dst, memory mem)
%{
match(Set dst (LoadP mem));
+ predicate(n->as_Load()->barrier_data() == 0);
ins_cost(125); // XXX
format %{ "movq $dst, $mem\t# ptr" %}
@@ -7794,6 +7601,7 @@
rax_RegP oldval, rRegP newval,
rFlagsReg cr)
%{
+ predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
@@ -7845,7 +7653,7 @@
rax_RegP oldval, rRegP newval,
rFlagsReg cr)
%{
- predicate(VM_Version::supports_cx8());
+ predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
@@ -8087,7 +7895,7 @@
rax_RegP oldval, rRegP newval,
rFlagsReg cr)
%{
- predicate(VM_Version::supports_cx8());
+ predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
effect(KILL cr);
@@ -8232,6 +8040,7 @@
instruct xchgP( memory mem, rRegP newval) %{
match(Set newval (GetAndSetP mem newval));
+ predicate(n->as_LoadStore()->barrier_data() == 0);
format %{ "XCHGQ $newval,[$mem]" %}
ins_encode %{
__ xchgq($newval$$Register, $mem$$Address);
@@ -11974,6 +11783,7 @@
instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
%{
match(Set cr (CmpP op1 (LoadP op2)));
+ predicate(n->in(2)->as_Load()->barrier_data() == 0);
ins_cost(500); // XXX
format %{ "cmpq $op1, $op2\t# ptr" %}
@@ -11999,7 +11809,8 @@
// and raw pointers have no anti-dependencies.
instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
%{
- predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none);
+ predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none &&
+ n->in(2)->as_Load()->barrier_data() == 0);
match(Set cr (CmpP op1 (LoadP op2)));
format %{ "cmpq $op1, $op2\t# raw ptr" %}
@@ -12024,7 +11835,8 @@
// any compare to a zero should be eq/neq.
instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
%{
- predicate(!UseCompressedOops || (CompressedOops::base() != NULL));
+ predicate((!UseCompressedOops || (CompressedOops::base() != NULL)) &&
+ n->in(1)->as_Load()->barrier_data() == 0);
match(Set cr (CmpP (LoadP op) zero));
ins_cost(500); // XXX
@@ -12037,7 +11849,9 @@
instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
%{
- predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
+ predicate(UseCompressedOops && (CompressedOops::base() == NULL) &&
+ (CompressedKlassPointers::base() == NULL) &&
+ n->in(1)->as_Load()->barrier_data() == 0);
match(Set cr (CmpP (LoadP mem) zero));
format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %}
--- a/src/hotspot/os/aix/os_aix.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/os/aix/os_aix.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -132,18 +132,6 @@
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
// excerpts from systemcfg.h that might be missing on older os levels
-#ifndef PV_5_Compat
- #define PV_5_Compat 0x0F8000 /* Power PC 5 */
-#endif
-#ifndef PV_6
- #define PV_6 0x100000 /* Power PC 6 */
-#endif
-#ifndef PV_6_1
- #define PV_6_1 0x100001 /* Power PC 6 DD1.x */
-#endif
-#ifndef PV_6_Compat
- #define PV_6_Compat 0x108000 /* Power PC 6 */
-#endif
#ifndef PV_7
#define PV_7 0x200000 /* Power PC 7 */
#endif
@@ -156,6 +144,13 @@
#ifndef PV_8_Compat
#define PV_8_Compat 0x308000 /* Power PC 8 */
#endif
+#ifndef PV_9
+ #define PV_9 0x400000 /* Power PC 9 */
+#endif
+#ifndef PV_9_Compat
+ #define PV_9_Compat 0x408000 /* Power PC 9 */
+#endif
+
static address resolve_function_descriptor_to_code_pointer(address p);
@@ -1027,17 +1022,15 @@
// Time since start-up in seconds to a fine granularity.
// Used by VMSelfDestructTimer and the MemProfiler.
double os::elapsedTime() {
- return (double)(os::elapsed_counter()) * 0.000001;
+ return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
}
jlong os::elapsed_counter() {
- timeval time;
- int status = gettimeofday(&time, NULL);
- return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
+ return javaTimeNanos() - initial_time_count;
}
jlong os::elapsed_frequency() {
- return (1000 * 1000);
+ return NANOSECS_PER_SEC; // nanosecond resolution
}
bool os::supports_vtime() { return true; }
@@ -1388,15 +1381,7 @@
void os::print_os_info(outputStream* st) {
st->print("OS:");
- st->print("uname:");
- struct utsname name;
- uname(&name);
- st->print(name.sysname); st->print(" ");
- st->print(name.nodename); st->print(" ");
- st->print(name.release); st->print(" ");
- st->print(name.version); st->print(" ");
- st->print(name.machine);
- st->cr();
+ os::Posix::print_uname_info(st);
uint32_t ver = os::Aix::os_version();
st->print_cr("AIX kernel version %u.%u.%u.%u",
@@ -1404,16 +1389,12 @@
os::Posix::print_rlimit_info(st);
+ os::Posix::print_load_average(st);
+
// _SC_THREAD_THREADS_MAX is the maximum number of threads within a process.
long tmax = sysconf(_SC_THREAD_THREADS_MAX);
st->print_cr("maximum #threads within a process:%ld", tmax);
- // load average
- st->print("load average:");
- double loadavg[3] = {-1.L, -1.L, -1.L};
- os::loadavg(loadavg, 3);
- st->print_cr("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
-
// print wpar info
libperfstat::wparinfo_t wi;
if (libperfstat::get_wparinfo(&wi)) {
@@ -1506,6 +1487,9 @@
void os::get_summary_cpu_info(char* buf, size_t buflen) {
// read _system_configuration.version
switch (_system_configuration.version) {
+ case PV_9:
+ strncpy(buf, "Power PC 9", buflen);
+ break;
case PV_8:
strncpy(buf, "Power PC 8", buflen);
break;
@@ -1539,6 +1523,9 @@
case PV_8_Compat:
strncpy(buf, "PV_8_Compat", buflen);
break;
+ case PV_9_Compat:
+ strncpy(buf, "PV_9_Compat", buflen);
+ break;
default:
strncpy(buf, "unknown", buflen);
}
@@ -3498,7 +3485,7 @@
// _main_thread points to the thread that created/loaded the JVM.
Aix::_main_thread = pthread_self();
- initial_time_count = os::elapsed_counter();
+ initial_time_count = javaTimeNanos();
os::Posix::init();
}
--- a/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -43,7 +43,7 @@
if ((uintptr_t)res != start) {
// Failed to reserve memory at the requested address
- unmap(start, size);
+ unmap((uintptr_t)res, size);
return false;
}
--- a/src/hotspot/os/posix/os_posix.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/os/posix/os_posix.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -373,8 +373,12 @@
void os::Posix::print_load_average(outputStream* st) {
st->print("load average:");
double loadavg[3];
- os::loadavg(loadavg, 3);
- st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
+ int res = os::loadavg(loadavg, 3);
+ if (res != -1) {
+ st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
+ } else {
+ st->print(" Unavailable");
+ }
st->cr();
}
--- a/src/hotspot/os/windows/os_windows.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/os/windows/os_windows.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -4159,128 +4159,135 @@
}
}
-// The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c
-// Creates an UNC path from a single byte path. Return buffer is
-// allocated in C heap and needs to be freed by the caller.
-// Returns NULL on error.
-static wchar_t* create_unc_path(const char* path, errno_t &err) {
- wchar_t* wpath = NULL;
- size_t converted_chars = 0;
- size_t path_len = strlen(path) + 1; // includes the terminating NULL
- if (path[0] == '\\' && path[1] == '\\') {
- if (path[2] == '?' && path[3] == '\\'){
- // if it already has a \\?\ don't do the prefix
- wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal);
- if (wpath != NULL) {
- err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len);
+// Returns the given path as an absolute wide path in unc format. The returned path is NULL
+// on error (with err being set accordingly) and should be freed via os::free() otherwise.
+// additional_space is the number of additionally allocated wchars after the terminating L'\0'.
+// This is based on pathToNTPath() in io_util_md.cpp, but omits the optimizations for
+// short paths.
+static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
+ if ((path == NULL) || (path[0] == '\0')) {
+ err = ENOENT;
+ return NULL;
+ }
+
+ size_t path_len = strlen(path);
+ // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
+ char* buf = (char*) os::malloc(1 + MAX2((size_t) 3, path_len), mtInternal);
+ wchar_t* result = NULL;
+
+ if (buf == NULL) {
+ err = ENOMEM;
+ } else {
+ memcpy(buf, path, path_len + 1);
+ os::native_path(buf);
+
+ wchar_t* prefix;
+ int prefix_off = 0;
+ bool is_abs = true;
+ bool needs_fullpath = true;
+
+ if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
+ prefix = L"\\\\?\\";
+ } else if (buf[0] == '\\' && buf[1] == '\\') {
+ if (buf[2] == '?' && buf[3] == '\\') {
+ prefix = L"";
+ needs_fullpath = false;
} else {
- err = ENOMEM;
+ prefix = L"\\\\?\\UNC";
+ prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
}
} else {
- // only UNC pathname includes double slashes here
- wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal);
- if (wpath != NULL) {
- ::wcscpy(wpath, L"\\\\?\\UNC\0");
- err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len);
- } else {
- err = ENOMEM;
+ is_abs = false;
+ prefix = L"\\\\?\\";
+ }
+
+ size_t buf_len = strlen(buf);
+ size_t prefix_len = wcslen(prefix);
+ size_t full_path_size = is_abs ? 1 + buf_len : JVM_MAXPATHLEN;
+ size_t result_size = prefix_len + full_path_size - prefix_off;
+ result = (wchar_t*) os::malloc(sizeof(wchar_t) * (additional_space + result_size), mtInternal);
+
+ if (result == NULL) {
+ err = ENOMEM;
+ } else {
+ size_t converted_chars;
+ wchar_t* path_start = result + prefix_len - prefix_off;
+ err = ::mbstowcs_s(&converted_chars, path_start, buf_len + 1, buf, buf_len);
+
+ if ((err == ERROR_SUCCESS) && needs_fullpath) {
+ wchar_t* tmp = (wchar_t*) os::malloc(sizeof(wchar_t) * full_path_size, mtInternal);
+
+ if (tmp == NULL) {
+ err = ENOMEM;
+ } else {
+ if (!_wfullpath(tmp, path_start, full_path_size)) {
+ err = ENOENT;
+ } else {
+ ::memcpy(path_start, tmp, (1 + wcslen(tmp)) * sizeof(wchar_t));
+ }
+
+ os::free(tmp);
+ }
+ }
+
+ memcpy(result, prefix, sizeof(wchar_t) * prefix_len);
+
+ // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
+ size_t result_len = wcslen(result);
+
+ if (result[result_len - 1] == L'\\') {
+ if (!(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
+ result[result_len - 1] = L'\0';
+ }
}
}
- } else {
- wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal);
- if (wpath != NULL) {
- ::wcscpy(wpath, L"\\\\?\\\0");
- err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len);
- } else {
- err = ENOMEM;
- }
- }
- return wpath;
-}
-
-static void destroy_unc_path(wchar_t* wpath) {
- os::free(wpath);
+ }
+
+ os::free(buf);
+
+ if (err != ERROR_SUCCESS) {
+ os::free(result);
+ result = NULL;
+ }
+
+ return result;
}
int os::stat(const char *path, struct stat *sbuf) {
- char* pathbuf = (char*)os::strdup(path, mtInternal);
- if (pathbuf == NULL) {
- errno = ENOMEM;
+ errno_t err;
+ wchar_t* wide_path = wide_abs_unc_path(path, err);
+
+ if (wide_path == NULL) {
+ errno = err;
return -1;
}
- os::native_path(pathbuf);
- int ret;
- WIN32_FILE_ATTRIBUTE_DATA file_data;
- // Not using stat() to avoid the problem described in JDK-6539723
- if (strlen(path) < MAX_PATH) {
- BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data);
- if (!bret) {
- errno = ::GetLastError();
- ret = -1;
- }
- else {
- file_attribute_data_to_stat(sbuf, file_data);
- ret = 0;
- }
- } else {
- errno_t err = ERROR_SUCCESS;
- wchar_t* wpath = create_unc_path(pathbuf, err);
- if (err != ERROR_SUCCESS) {
- if (wpath != NULL) {
- destroy_unc_path(wpath);
- }
- os::free(pathbuf);
- errno = err;
- return -1;
- }
- BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data);
- if (!bret) {
- errno = ::GetLastError();
- ret = -1;
- } else {
- file_attribute_data_to_stat(sbuf, file_data);
- ret = 0;
- }
- destroy_unc_path(wpath);
- }
- os::free(pathbuf);
- return ret;
+
+ WIN32_FILE_ATTRIBUTE_DATA file_data;;
+ BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
+ os::free(wide_path);
+
+ if (!bret) {
+ errno = ::GetLastError();
+ return -1;
+ }
+
+ file_attribute_data_to_stat(sbuf, file_data);
+ return 0;
}
static HANDLE create_read_only_file_handle(const char* file) {
- if (file == NULL) {
- return INVALID_HANDLE_VALUE;
- }
-
- char* nativepath = (char*)os::strdup(file, mtInternal);
- if (nativepath == NULL) {
- errno = ENOMEM;
+ errno_t err;
+ wchar_t* wide_path = wide_abs_unc_path(file, err);
+
+ if (wide_path == NULL) {
+ errno = err;
return INVALID_HANDLE_VALUE;
}
- os::native_path(nativepath);
-
- size_t len = strlen(nativepath);
- HANDLE handle = INVALID_HANDLE_VALUE;
-
- if (len < MAX_PATH) {
- handle = ::CreateFile(nativepath, 0, FILE_SHARE_READ,
- NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
- } else {
- errno_t err = ERROR_SUCCESS;
- wchar_t* wfile = create_unc_path(nativepath, err);
- if (err != ERROR_SUCCESS) {
- if (wfile != NULL) {
- destroy_unc_path(wfile);
- }
- os::free(nativepath);
- return INVALID_HANDLE_VALUE;
- }
- handle = ::CreateFileW(wfile, 0, FILE_SHARE_READ,
- NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
- destroy_unc_path(wfile);
- }
-
- os::free(nativepath);
+
+ HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
+ NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ os::free(wide_path);
+
return handle;
}
@@ -4329,7 +4336,6 @@
return result;
}
-
#define FT2INT64(ft) \
((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
@@ -4434,38 +4440,22 @@
return DontYieldALot;
}
-// This method is a slightly reworked copy of JDK's sysOpen
-// from src/windows/hpi/src/sys_api_md.c
-
int os::open(const char *path, int oflag, int mode) {
- char* pathbuf = (char*)os::strdup(path, mtInternal);
- if (pathbuf == NULL) {
- errno = ENOMEM;
+ errno_t err;
+ wchar_t* wide_path = wide_abs_unc_path(path, err);
+
+ if (wide_path == NULL) {
+ errno = err;
return -1;
}
- os::native_path(pathbuf);
- int ret;
- if (strlen(path) < MAX_PATH) {
- ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
- } else {
- errno_t err = ERROR_SUCCESS;
- wchar_t* wpath = create_unc_path(pathbuf, err);
- if (err != ERROR_SUCCESS) {
- if (wpath != NULL) {
- destroy_unc_path(wpath);
- }
- os::free(pathbuf);
- errno = err;
- return -1;
- }
- ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode);
- if (ret == -1) {
- errno = ::GetLastError();
- }
- destroy_unc_path(wpath);
- }
- os::free(pathbuf);
- return ret;
+ int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
+ os::free(wide_path);
+
+ if (fd == -1) {
+ errno = ::GetLastError();
+ }
+
+ return fd;
}
FILE* os::open(int fd, const char* mode) {
@@ -4474,37 +4464,26 @@
// Is a (classpath) directory empty?
bool os::dir_is_empty(const char* path) {
- char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal);
- if (search_path == NULL) {
- errno = ENOMEM;
- return false;
- }
- strcpy(search_path, path);
- os::native_path(search_path);
- // Append "*", or possibly "\\*", to path
- if (search_path[1] == ':' &&
- (search_path[2] == '\0' ||
- (search_path[2] == '\\' && search_path[3] == '\0'))) {
- // No '\\' needed for cases like "Z:" or "Z:\"
- strcat(search_path, "*");
- }
- else {
- strcat(search_path, "\\*");
- }
- errno_t err = ERROR_SUCCESS;
- wchar_t* wpath = create_unc_path(search_path, err);
- if (err != ERROR_SUCCESS) {
- if (wpath != NULL) {
- destroy_unc_path(wpath);
- }
- os::free(search_path);
+ errno_t err;
+ wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
+
+ if (wide_path == NULL) {
errno = err;
return false;
}
+
+ // Make sure we end with "\\*"
+ if (wide_path[wcslen(wide_path) - 1] == L'\\') {
+ wcscat(wide_path, L"*");
+ } else {
+ wcscat(wide_path, L"\\*");
+ }
+
WIN32_FIND_DATAW fd;
- HANDLE f = ::FindFirstFileW(wpath, &fd);
- destroy_unc_path(wpath);
+ HANDLE f = ::FindFirstFileW(wide_path, &fd);
+ os::free(wide_path);
bool is_empty = true;
+
if (f != INVALID_HANDLE_VALUE) {
while (is_empty && ::FindNextFileW(f, &fd)) {
// An empty directory contains only the current directory file
@@ -4515,8 +4494,10 @@
}
}
FindClose(f);
- }
- os::free(search_path);
+ } else {
+ errno = ::GetLastError();
+ }
+
return is_empty;
}
--- a/src/hotspot/share/adlc/formssel.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/adlc/formssel.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -773,11 +773,6 @@
!strcmp(_matrule->_rChild->_opType,"CheckCastPP") ||
!strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
!strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
-#if INCLUDE_ZGC
- !strcmp(_matrule->_rChild->_opType,"ZGetAndSetP") ||
- !strcmp(_matrule->_rChild->_opType,"ZCompareAndExchangeP") ||
- !strcmp(_matrule->_rChild->_opType,"LoadBarrierSlowReg") ||
-#endif
#if INCLUDE_SHENANDOAHGC
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
@@ -3510,9 +3505,6 @@
"StoreCM",
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
-#if INCLUDE_ZGC
- "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
-#endif
"ClearArray"
};
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
@@ -4047,6 +4039,7 @@
strcmp(opType,"FmaD") == 0 ||
strcmp(opType,"FmaF") == 0 ||
strcmp(opType,"RoundDouble")==0 ||
+ strcmp(opType,"RoundDoubleMode")==0 ||
strcmp(opType,"RoundFloat")==0 ||
strcmp(opType,"ReverseBytesI")==0 ||
strcmp(opType,"ReverseBytesL")==0 ||
@@ -4175,7 +4168,7 @@
"URShiftVB","URShiftVS","URShiftVI","URShiftVL",
"MaxReductionV", "MinReductionV",
"ReplicateB","ReplicateS","ReplicateI","ReplicateL","ReplicateF","ReplicateD",
- "LoadVector","StoreVector",
+ "RoundDoubleModeV","LoadVector","StoreVector",
"FmaVD", "FmaVF","PopCountVI",
// Next are not supported currently.
"PackB","PackS","PackI","PackL","PackF","PackD","Pack2L","Pack2D",
--- a/src/hotspot/share/aot/aotCodeHeap.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -37,6 +37,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -32,6 +32,7 @@
#include "compiler/compilerOracle.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -33,13 +33,13 @@
#include "ci/ciKlass.hpp"
#include "ci/ciMemberName.hpp"
#include "ci/ciUtilities.inline.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/bytecode.hpp"
#include "jfr/jfrEvents.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/sharedRuntime.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/bitMap.inline.hpp"
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -36,6 +36,7 @@
#include "ci/ciUtilities.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
+#include "oops/klass.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
--- a/src/hotspot/share/c1/c1_Runtime1.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -37,6 +37,7 @@
#include "code/pcDesc.hpp"
#include "code/scopeDesc.hpp"
#include "code/vtableStubs.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
@@ -55,7 +56,6 @@
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/ci/ciEnv.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/ci/ciEnv.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -154,6 +154,7 @@
_the_null_string = NULL;
_the_min_jint_string = NULL;
+ _jvmti_redefinition_count = 0;
_jvmti_can_hotswap_or_post_breakpoint = false;
_jvmti_can_access_local_variables = false;
_jvmti_can_post_on_exceptions = false;
@@ -209,6 +210,7 @@
_the_null_string = NULL;
_the_min_jint_string = NULL;
+ _jvmti_redefinition_count = 0;
_jvmti_can_hotswap_or_post_breakpoint = false;
_jvmti_can_access_local_variables = false;
_jvmti_can_post_on_exceptions = false;
@@ -231,13 +233,20 @@
VM_ENTRY_MARK;
// Get Jvmti capabilities under lock to get consistant values.
MutexLocker mu(JvmtiThreadState_lock);
+ _jvmti_redefinition_count = JvmtiExport::redefinition_count();
_jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
_jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables();
_jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions();
_jvmti_can_pop_frame = JvmtiExport::can_pop_frame();
+ _jvmti_can_get_owned_monitor_info = JvmtiExport::can_get_owned_monitor_info();
}
bool ciEnv::jvmti_state_changed() const {
+ // Some classes were redefined
+ if (_jvmti_redefinition_count != JvmtiExport::redefinition_count()) {
+ return true;
+ }
+
if (!_jvmti_can_access_local_variables &&
JvmtiExport::can_access_local_variables()) {
return true;
@@ -254,6 +263,11 @@
JvmtiExport::can_pop_frame()) {
return true;
}
+ if (!_jvmti_can_get_owned_monitor_info &&
+ JvmtiExport::can_get_owned_monitor_info()) {
+ return true;
+ }
+
return false;
}
--- a/src/hotspot/share/ci/ciEnv.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/ci/ciEnv.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -68,10 +68,12 @@
int _name_buffer_len;
// Cache Jvmti state
+ uint64_t _jvmti_redefinition_count;
bool _jvmti_can_hotswap_or_post_breakpoint;
bool _jvmti_can_access_local_variables;
bool _jvmti_can_post_on_exceptions;
bool _jvmti_can_pop_frame;
+ bool _jvmti_can_get_owned_monitor_info; // includes can_get_owned_monitor_stack_depth_info
// Cache DTrace flags
bool _dtrace_extended_probes;
@@ -346,6 +348,7 @@
}
bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; }
+ bool jvmti_can_get_owned_monitor_info() const { return _jvmti_can_get_owned_monitor_info; }
// Cache DTrace flags
void cache_dtrace_flags();
--- a/src/hotspot/share/classfile/classFileParser.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/classFileParser.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -3004,7 +3004,7 @@
// We temporarily use the vtable_index field in the Method* to store the
// class file index, so we can read in after calling qsort.
// Put the method ordering in the shared archive.
- if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
+ if (JvmtiExport::can_maintain_original_method_order() || Arguments::is_dumping_archive()) {
for (int index = 0; index < length; index++) {
Method* const m = methods->at(index);
assert(!m->valid_vtable_index(), "vtable index should not be set");
@@ -3018,7 +3018,7 @@
intArray* method_ordering = NULL;
// If JVMTI original method ordering or sharing is enabled construct int
// array remembering the original ordering
- if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
+ if (JvmtiExport::can_maintain_original_method_order() || Arguments::is_dumping_archive()) {
method_ordering = new intArray(length, length, -1);
for (int index = 0; index < length; index++) {
Method* const m = methods->at(index);
--- a/src/hotspot/share/classfile/classLoader.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/classLoader.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -57,7 +57,6 @@
#include "oops/symbol.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -462,7 +461,7 @@
#if INCLUDE_CDS
void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only called at dump time");
+ Arguments::assert_is_dumping_archive();
tty->print_cr("Hint: enable -Xlog:class+path=info to diagnose the failure");
vm_exit_during_initialization(error, message);
}
@@ -532,7 +531,7 @@
#if INCLUDE_CDS
void ClassLoader::setup_app_search_path(const char *class_path) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "Sanity");
+ Arguments::assert_is_dumping_archive();
ResourceMark rm;
ClasspathStream cp_stream(class_path);
@@ -546,7 +545,7 @@
void ClassLoader::add_to_module_path_entries(const char* path,
ClassPathEntry* entry) {
assert(entry != NULL, "ClassPathEntry should not be NULL");
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
+ Arguments::assert_is_dumping_archive();
// The entry does not exist, add to the list
if (_module_path_entries == NULL) {
@@ -560,7 +559,7 @@
// Add a module path to the _module_path_entries list.
void ClassLoader::update_module_path_entry_list(const char *path, TRAPS) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
+ Arguments::assert_is_dumping_archive();
struct stat st;
if (os::stat(path, &st) != 0) {
tty->print_cr("os::stat error %d (%s). CDS dump aborted (path was \"%s\").",
@@ -656,7 +655,7 @@
bool set_base_piece = true;
#if INCLUDE_CDS
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
if (!Arguments::has_jimage()) {
vm_exit_during_initialization("CDS is not supported in exploded JDK build", NULL);
}
@@ -1360,7 +1359,7 @@
// Record the shared classpath index and loader type for classes loaded
// by the builtin loaders at dump time.
void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream, TRAPS) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "sanity");
+ Arguments::assert_is_dumping_archive();
assert(stream != NULL, "sanity");
if (ik->is_unsafe_anonymous()) {
@@ -1537,13 +1536,13 @@
#if INCLUDE_CDS
void ClassLoader::initialize_shared_path() {
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
ClassLoaderExt::setup_search_paths();
}
}
void ClassLoader::initialize_module_path(TRAPS) {
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
ClassLoaderExt::setup_module_paths(THREAD);
FileMapInfo::allocate_shared_path_table();
}
--- a/src/hotspot/share/classfile/classLoader.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/classLoader.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,7 @@
#define SHARE_CLASSFILE_CLASSLOADER_HPP
#include "jimage.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
#include "utilities/exceptions.hpp"
@@ -236,6 +237,8 @@
CDS_ONLY(static ClassPathEntry* app_classpath_entries() {return _app_classpath_entries;})
CDS_ONLY(static ClassPathEntry* module_path_entries() {return _module_path_entries;})
+ static bool has_bootclasspath_append() { return _first_append_entry != NULL; }
+
protected:
// Initialization:
// - setup the boot loader's system class path
@@ -395,8 +398,7 @@
// Helper function used by CDS code to get the number of module path
// entries during shared classpath setup time.
static int num_module_path_entries() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
- "Should only be called at CDS dump time");
+ Arguments::assert_is_dumping_archive();
int num_entries = 0;
ClassPathEntry* e= ClassLoader::_module_path_entries;
while (e != NULL) {
--- a/src/hotspot/share/classfile/classLoader.inline.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/classLoader.inline.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -62,8 +62,7 @@
// entries during shared classpath setup time.
inline int ClassLoader::num_boot_classpath_entries() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
- "Should only be called at CDS dump time");
+ Arguments::assert_is_dumping_archive();
assert(has_jrt_entry(), "must have a java runtime image");
int num_entries = 1; // count the runtime image
ClassPathEntry* e = ClassLoader::_first_append_entry;
@@ -85,8 +84,7 @@
// Helper function used by CDS code to get the number of app classpath
// entries during shared classpath setup time.
inline int ClassLoader::num_app_classpath_entries() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
- "Should only be called at CDS dump time");
+ Arguments::assert_is_dumping_archive();
int num_entries = 0;
ClassPathEntry* e= ClassLoader::_app_classpath_entries;
while (e != NULL) {
--- a/src/hotspot/share/classfile/classLoaderData.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -129,8 +129,8 @@
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous) :
_metaspace(NULL),
- _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
- Monitor::_safepoint_check_never)),
+ _metaspace_lock(new Mutex(Mutex::leaf+1, "Metaspace allocation lock", true,
+ Mutex::_safepoint_check_never)),
_unloading(false), _is_unsafe_anonymous(is_unsafe_anonymous),
_modified_oops(true), _accumulated_modified_oops(false),
// An unsafe anonymous class loader data doesn't have anything to keep
--- a/src/hotspot/share/classfile/classLoaderExt.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -62,8 +62,7 @@
}
void ClassLoaderExt::setup_app_search_path() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
- "this function is only used at CDS dump time");
+ Arguments::assert_is_dumping_archive();
_app_class_paths_start_index = ClassLoader::num_boot_classpath_entries();
char* app_class_path = os::strdup(Arguments::get_appclasspath());
@@ -92,8 +91,7 @@
}
}
void ClassLoaderExt::setup_module_paths(TRAPS) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
- "this function is only used with CDS dump time");
+ Arguments::assert_is_dumping_archive();
_app_module_paths_start_index = ClassLoader::num_boot_classpath_entries() +
ClassLoader::num_app_classpath_entries();
Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
@@ -231,7 +229,7 @@
void ClassLoaderExt::record_result(const s2 classpath_index,
InstanceKlass* result,
TRAPS) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "Sanity");
+ Arguments::assert_is_dumping_archive();
// We need to remember where the class comes from during dumping.
oop loader = result->class_loader();
--- a/src/hotspot/share/classfile/compactHashtable.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/compactHashtable.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -42,7 +42,7 @@
//
CompactHashtableWriter::CompactHashtableWriter(int num_entries,
CompactHashtableStats* stats) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only");
+ Arguments::assert_is_dumping_archive();
assert(num_entries >= 0, "sanity");
_num_buckets = calculate_num_buckets(num_entries);
assert(_num_buckets > 0, "no buckets");
--- a/src/hotspot/share/classfile/dictionary.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/dictionary.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -246,7 +246,7 @@
// Used to scan and relocate the classes during CDS archive dump.
void Dictionary::classes_do(MetaspaceClosure* it) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only");
+ Arguments::assert_is_dumping_archive();
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
probe != NULL;
--- a/src/hotspot/share/classfile/javaClasses.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/javaClasses.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -377,7 +377,7 @@
if (_to_java_string_fn == NULL) {
void *lib_handle = os::native_java_library();
- _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "NewStringPlatform"));
+ _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "JNU_NewStringPlatform"));
if (_to_java_string_fn == NULL) {
fatal("NewStringPlatform missing");
}
--- a/src/hotspot/share/classfile/javaClasses.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/javaClasses.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -202,7 +202,6 @@
// Conversion between '.' and '/' formats
static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
- static Handle internalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '.', '/', THREAD); }
// Conversion
static Symbol* as_symbol(oop java_string);
--- a/src/hotspot/share/classfile/klassFactory.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/klassFactory.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -218,7 +218,7 @@
JFR_ONLY(ON_KLASS_CREATION(result, parser, THREAD);)
#if INCLUDE_CDS
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
ClassLoader::record_result(result, stream, THREAD);
}
#endif // INCLUDE_CDS
--- a/src/hotspot/share/classfile/symbolTable.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/symbolTable.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -220,7 +220,7 @@
assert (len <= Symbol::max_length(), "should be checked by caller");
Symbol* sym;
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
c_heap = false;
}
if (c_heap) {
@@ -283,7 +283,7 @@
};
void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "called only during dump time");
+ Arguments::assert_is_dumping_archive();
MetaspacePointersDo mpd(it);
_local_table->do_safepoint_scan(mpd);
}
--- a/src/hotspot/share/classfile/systemDictionary.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1205,10 +1205,8 @@
TempNewSymbol pkg_name = NULL;
PackageEntry* pkg_entry = NULL;
ModuleEntry* mod_entry = NULL;
- const char* pkg_string = NULL;
pkg_name = InstanceKlass::package_from_name(class_name, CHECK_false);
if (pkg_name != NULL) {
- pkg_string = pkg_name->as_C_string();
if (loader_data != NULL) {
pkg_entry = loader_data->packages()->lookup_only(pkg_name);
}
@@ -1245,7 +1243,7 @@
// 3. or, the class is from an unamed module
if (!ent->is_modules_image() && ik->is_shared_boot_class()) {
// the class is from the -Xbootclasspath/a
- if (pkg_string == NULL ||
+ if (pkg_name == NULL ||
pkg_entry == NULL ||
pkg_entry->in_unnamed_module()) {
assert(mod_entry == NULL ||
@@ -1257,8 +1255,7 @@
return false;
} else {
bool res = SystemDictionaryShared::is_shared_class_visible_for_classloader(
- ik, class_loader, pkg_string, pkg_name,
- pkg_entry, mod_entry, CHECK_(false));
+ ik, class_loader, pkg_name, pkg_entry, mod_entry, CHECK_(false));
return res;
}
}
@@ -1432,6 +1429,11 @@
// a named package within the unnamed module. In all cases,
// limit visibility to search for the class only in the boot
// loader's append path.
+ if (!ClassLoader::has_bootclasspath_append()) {
+ // If there is no bootclasspath append entry, no need to continue
+ // searching.
+ return NULL;
+ }
search_only_bootloader_append = true;
}
}
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -657,7 +657,6 @@
bool SystemDictionaryShared::is_shared_class_visible_for_classloader(
InstanceKlass* ik,
Handle class_loader,
- const char* pkg_string,
Symbol* pkg_name,
PackageEntry* pkg_entry,
ModuleEntry* mod_entry,
@@ -684,7 +683,7 @@
}
} else if (SystemDictionary::is_system_class_loader(class_loader())) {
assert(ent != NULL, "shared class for system loader should have valid SharedClassPathEntry");
- if (pkg_string == NULL) {
+ if (pkg_name == NULL) {
// The archived class is in the unnamed package. Currently, the boot image
// does not contain any class in the unnamed package.
assert(!ent->is_modules_image(), "Class in the unnamed package must be from the classpath");
@@ -906,14 +905,9 @@
return NULL;
}
- const RunTimeSharedClassInfo* record = find_record(&_unregistered_dictionary, class_name);
+ const RunTimeSharedClassInfo* record = find_record(&_unregistered_dictionary, &_dynamic_unregistered_dictionary, class_name);
if (record == NULL) {
- if (DynamicArchive::is_mapped()) {
- record = find_record(&_dynamic_unregistered_dictionary, class_name);
- }
- if (record == NULL) {
- return NULL;
- }
+ return NULL;
}
int clsfile_size = cfs->length();
@@ -1029,7 +1023,7 @@
}
void SystemDictionaryShared::set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only when dumping");
+ Arguments::assert_is_dumping_archive();
assert(!is_builtin(k), "must be unregistered class");
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
info->_clsfile_size = cfs->length();
@@ -1185,7 +1179,7 @@
bool SystemDictionaryShared::is_excluded_class(InstanceKlass* k) {
assert(_no_class_loading_should_happen, "sanity");
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only when dumping");
+ Arguments::assert_is_dumping_archive();
return find_or_allocate_info_for(k)->is_excluded();
}
@@ -1209,7 +1203,7 @@
bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbol* name,
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "called at dump time only");
+ Arguments::assert_is_dumping_archive();
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
info->add_verification_constraint(k, name, from_name, from_field_is_protected,
from_is_array, from_is_object);
@@ -1413,29 +1407,34 @@
}
const RunTimeSharedClassInfo*
-SystemDictionaryShared::find_record(RunTimeSharedDictionary* dict, Symbol* name) {
- if (UseSharedSpaces) {
- unsigned int hash = primitive_hash<Symbol*>(name);
- return dict->lookup(name, hash, 0);
- } else {
+SystemDictionaryShared::find_record(RunTimeSharedDictionary* static_dict, RunTimeSharedDictionary* dynamic_dict, Symbol* name) {
+ if (!UseSharedSpaces || !name->is_shared()) {
+ // The names of all shared classes must also be a shared Symbol.
return NULL;
}
+
+ unsigned int hash = primitive_hash<Symbol*>(name);
+ const RunTimeSharedClassInfo* record = NULL;
+ if (!MetaspaceShared::is_shared_dynamic(name)) {
+ // The names of all shared classes in the static dict must also be in the
+ // static archive
+ record = static_dict->lookup(name, hash, 0);
+ }
+
+ if (record == NULL && DynamicArchive::is_mapped()) {
+ record = dynamic_dict->lookup(name, hash, 0);
+ }
+
+ return record;
}
InstanceKlass* SystemDictionaryShared::find_builtin_class(Symbol* name) {
- const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, name);
- if (record) {
+ const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, &_dynamic_builtin_dictionary, name);
+ if (record != NULL) {
return record->_klass;
+ } else {
+ return NULL;
}
-
- if (DynamicArchive::is_mapped()) {
- record = find_record(&_dynamic_builtin_dictionary, name);
- if (record) {
- return record->_klass;
- }
- }
-
- return NULL;
}
void SystemDictionaryShared::update_shared_entry(InstanceKlass* k, int id) {
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -223,7 +223,9 @@
public:
static InstanceKlass* find_builtin_class(Symbol* class_name);
- static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* dict, Symbol* name);
+ static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* static_dict,
+ RunTimeSharedDictionary* dynamic_dict,
+ Symbol* name);
static bool has_platform_or_app_classes();
@@ -240,7 +242,6 @@
static bool is_sharing_possible(ClassLoaderData* loader_data);
static bool is_shared_class_visible_for_classloader(InstanceKlass* ik,
Handle class_loader,
- const char* pkg_string,
Symbol* pkg_name,
PackageEntry* pkg_entry,
ModuleEntry* mod_entry,
--- a/src/hotspot/share/classfile/verificationType.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/verificationType.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -28,6 +28,7 @@
#include "classfile/verificationType.hpp"
#include "classfile/verifier.hpp"
#include "logging/log.hpp"
+#include "oops/klass.inline.hpp"
#include "runtime/handles.inline.hpp"
VerificationType VerificationType::from_tag(u1 tag) {
@@ -94,7 +95,7 @@
return true;
}
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
if (SystemDictionaryShared::add_verification_constraint(klass,
name(), from.name(), from_field_is_protected, from.is_array(),
from.is_object())) {
--- a/src/hotspot/share/classfile/verifier.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/verifier.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -63,29 +63,39 @@
#define STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION 52
#define MAX_ARRAY_DIMENSIONS 255
-// Access to external entry for VerifyClassCodes - old byte code verifier
+// Access to external entry for VerifyClassForMajorVersion - old byte code verifier
extern "C" {
- typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint);
- typedef jboolean (*verify_byte_codes_fn_new_t)(JNIEnv *, jclass, char *, jint, jint);
+ typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint, jint);
}
-static void* volatile _verify_byte_codes_fn = NULL;
+static verify_byte_codes_fn_t volatile _verify_byte_codes_fn = NULL;
+
+static verify_byte_codes_fn_t verify_byte_codes_fn() {
-static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
+ if (_verify_byte_codes_fn != NULL)
+ return _verify_byte_codes_fn;
+
+ MutexLocker locker(Verify_lock);
+
+ if (_verify_byte_codes_fn != NULL)
+ return _verify_byte_codes_fn;
-static void* verify_byte_codes_fn() {
- if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) {
- void *lib_handle = os::native_java_library();
- void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
- OrderAccess::release_store(&_verify_byte_codes_fn, func);
- if (func == NULL) {
- _is_new_verify_byte_codes_fn = false;
- func = os::dll_lookup(lib_handle, "VerifyClassCodes");
- OrderAccess::release_store(&_verify_byte_codes_fn, func);
- }
- }
- return (void*)_verify_byte_codes_fn;
+ // Load verify dll
+ char buffer[JVM_MAXPATHLEN];
+ char ebuf[1024];
+ if (!os::dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), "verify"))
+ return NULL; // Caller will throw VerifyError
+
+ void *lib_handle = os::dll_load(buffer, ebuf, sizeof(ebuf));
+ if (lib_handle == NULL)
+ return NULL; // Caller will throw VerifyError
+
+ void *fn = os::dll_lookup(lib_handle, "VerifyClassForMajorVersion");
+ if (fn == NULL)
+ return NULL; // Caller will throw VerifyError
+
+ return _verify_byte_codes_fn = CAST_TO_FN_PTR(verify_byte_codes_fn_t, fn);
}
@@ -282,7 +292,7 @@
JavaThread* thread = (JavaThread*)THREAD;
JNIEnv *env = thread->jni_environment();
- void* verify_func = verify_byte_codes_fn();
+ verify_byte_codes_fn_t verify_func = verify_byte_codes_fn();
if (verify_func == NULL) {
jio_snprintf(message, message_len, "Could not link verifier");
@@ -301,16 +311,7 @@
// ThreadToNativeFromVM takes care of changing thread_state, so safepoint
// code knows that we have left the VM
- if (_is_new_verify_byte_codes_fn) {
- verify_byte_codes_fn_new_t func =
- CAST_TO_FN_PTR(verify_byte_codes_fn_new_t, verify_func);
- result = (*func)(env, cls, message, (int)message_len,
- klass->major_version());
- } else {
- verify_byte_codes_fn_t func =
- CAST_TO_FN_PTR(verify_byte_codes_fn_t, verify_func);
- result = (*func)(env, cls, message, (int)message_len);
- }
+ result = (*verify_func)(env, cls, message, (int)message_len, klass->major_version());
}
JNIHandles::destroy_local(cls);
--- a/src/hotspot/share/classfile/vmSymbols.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -575,6 +575,9 @@
case vmIntrinsics::_intBitsToFloat:
case vmIntrinsics::_doubleToRawLongBits:
case vmIntrinsics::_longBitsToDouble:
+ case vmIntrinsics::_ceil:
+ case vmIntrinsics::_floor:
+ case vmIntrinsics::_rint:
case vmIntrinsics::_dabs:
case vmIntrinsics::_fabs:
case vmIntrinsics::_iabs:
--- a/src/hotspot/share/classfile/vmSymbols.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -766,6 +766,7 @@
do_name(tan_name,"tan") do_name(atan2_name,"atan2") do_name(sqrt_name,"sqrt") \
do_name(log_name,"log") do_name(log10_name,"log10") do_name(pow_name,"pow") \
do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \
+ do_name(floor_name, "floor") do_name(ceil_name, "ceil") do_name(rint_name, "rint") \
\
do_name(addExact_name,"addExact") \
do_name(decrementExact_name,"decrementExact") \
@@ -781,6 +782,9 @@
do_intrinsic(_iabs, java_lang_Math, abs_name, int_int_signature, F_S) \
do_intrinsic(_labs, java_lang_Math, abs_name, long_long_signature, F_S) \
do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \
+ do_intrinsic(_floor, java_lang_Math, floor_name, double_double_signature, F_S) \
+ do_intrinsic(_ceil, java_lang_Math, ceil_name, double_double_signature, F_S) \
+ do_intrinsic(_rint, java_lang_Math, rint_name, double_double_signature, F_S) \
do_intrinsic(_dcos, java_lang_Math, cos_name, double_double_signature, F_S) \
do_intrinsic(_dtan, java_lang_Math, tan_name, double_double_signature, F_S) \
do_intrinsic(_datan2, java_lang_Math, atan2_name, double2_double_signature, F_S) \
--- a/src/hotspot/share/code/codeCache.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/code/codeCache.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -33,6 +33,7 @@
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
@@ -46,7 +47,6 @@
#include "oops/oop.inline.hpp"
#include "oops/verifyOopClosure.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
--- a/src/hotspot/share/code/compiledIC.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/code/compiledIC.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -741,4 +741,22 @@
tty->cr();
}
+void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, address entry,
+ NativeMovConstReg* method_holder,
+ NativeJump* jump) {
+ // A generated lambda form might be deleted from the Lambdaform
+ // cache in MethodTypeForm. If a jit compiled lambdaform method
+ // becomes not entrant and the cache access returns null, the new
+ // resolve will lead to a new generated LambdaForm.
+ Method* old_method = reinterpret_cast<Method*>(method_holder->data());
+ assert(old_method == NULL || old_method == callee() ||
+ callee->is_compiled_lambda_form() ||
+ !old_method->method_holder()->is_loader_alive() ||
+ old_method->is_old(), // may be race patching deoptimized nmethod due to redefinition.
+ "a) MT-unsafe modification of inline cache");
+
+ address destination = jump->jump_destination();
+ assert(destination == (address)-1 || destination == entry,
+ "b) MT-unsafe modification of inline cache");
+}
#endif // !PRODUCT
--- a/src/hotspot/share/code/compiledIC.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/code/compiledIC.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -402,6 +402,9 @@
// Also used by CompiledIC
void set_to_interpreted(const methodHandle& callee, address entry);
+ void verify_mt_safe(const methodHandle& callee, address entry,
+ NativeMovConstReg* method_holder,
+ NativeJump* jump) PRODUCT_RETURN;
#if INCLUDE_AOT
void set_to_far(const methodHandle& callee, address entry);
#endif
--- a/src/hotspot/share/code/nmethod.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/code/nmethod.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1293,7 +1293,6 @@
*/
bool nmethod::make_not_entrant_or_zombie(int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
- assert(!is_zombie(), "should not already be a zombie");
if (Atomic::load(&_state) >= state) {
// Avoid taking the lock if already in required state.
@@ -1316,20 +1315,18 @@
// This flag is used to remember whether we need to later lock and unregister.
bool nmethod_needs_unregister = false;
- // invalidate osr nmethod before acquiring the patching lock since
- // they both acquire leaf locks and we don't want a deadlock.
- // This logic is equivalent to the logic below for patching the
- // verified entry point of regular methods. We check that the
- // nmethod is in use to ensure that it is invalidated only once.
- if (is_osr_method() && is_in_use()) {
- // this effectively makes the osr nmethod not entrant
- invalidate_osr_method();
- }
-
{
// Enter critical section. Does not block for safepoint.
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+ // This logic is equivalent to the logic below for patching the
+ // verified entry point of regular methods. We check that the
+ // nmethod is in use to ensure that it is invalidated only once.
+ if (is_osr_method() && is_in_use()) {
+ // this effectively makes the osr nmethod not entrant
+ invalidate_osr_method();
+ }
+
if (Atomic::load(&_state) >= state) {
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
@@ -2192,6 +2189,17 @@
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
+class VerifyMetadataClosure: public MetadataClosure {
+ public:
+ void do_metadata(Metadata* md) {
+ if (md->is_method()) {
+ Method* method = (Method*)md;
+ assert(!method->is_old(), "Should not be installing old methods");
+ }
+ }
+};
+
+
void nmethod::verify() {
// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
@@ -2255,6 +2263,10 @@
Universe::heap()->verify_nmethod(this);
verify_scopes();
+
+ CompiledICLocker nm_verify(this);
+ VerifyMetadataClosure vmc;
+ metadata_do(&vmc);
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/compiler/compilationPolicy.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderDataGraph.inline.hpp"
+#include "code/compiledIC.hpp"
+#include "code/nmethod.hpp"
+#include "code/scopeDesc.hpp"
+#include "compiler/compilationPolicy.hpp"
+#include "compiler/tieredThresholdPolicy.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/nativeLookup.hpp"
+#include "runtime/frame.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/vframe.hpp"
+#include "runtime/vmOperations.hpp"
+#include "utilities/events.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#ifdef COMPILER1
+#include "c1/c1_Compiler.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/c2compiler.hpp"
+#endif
+
+CompilationPolicy* CompilationPolicy::_policy;
+
+// Determine compilation policy based on command line argument
+void compilationPolicy_init() {
+ #ifdef TIERED
+ if (TieredCompilation) {
+ CompilationPolicy::set_policy(new TieredThresholdPolicy());
+ } else {
+ CompilationPolicy::set_policy(new SimpleCompPolicy());
+ }
+ #else
+ CompilationPolicy::set_policy(new SimpleCompPolicy());
+ #endif
+
+ CompilationPolicy::policy()->initialize();
+}
+
+// Returns true if m must be compiled before executing it
+// This is intended to force compiles for methods (usually for
+// debugging) that would otherwise be interpreted for some reason.
+bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
+ // Don't allow Xcomp to cause compiles in replay mode
+ if (ReplayCompiles) return false;
+
+ if (m->has_compiled_code()) return false; // already compiled
+ if (!can_be_compiled(m, comp_level)) return false;
+
+ return !UseInterpreter || // must compile all methods
+ (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
+}
+
+void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) {
+ if (must_be_compiled(selected_method)) {
+ // This path is unusual, mostly used by the '-Xcomp' stress test mode.
+
+ // Note: with several active threads, the must_be_compiled may be true
+ // while can_be_compiled is false; remove assert
+ // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
+ if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
+ // don't force compilation, resolve was on behalf of compiler
+ return;
+ }
+ if (selected_method->method_holder()->is_not_initialized()) {
+ // 'is_not_initialized' means not only '!is_initialized', but also that
+ // initialization has not been started yet ('!being_initialized')
+ // Do not force compilation of methods in uninitialized classes.
+ // Note that doing this would throw an assert later,
+ // in CompileBroker::compile_method.
+ // We sometimes use the link resolver to do reflective lookups
+ // even before classes are initialized.
+ return;
+ }
+ CompileBroker::compile_method(selected_method, InvocationEntryBci,
+ CompilationPolicy::policy()->initial_compile_level(),
+ methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK);
+ }
+}
+
+// Returns true if m is allowed to be compiled
+bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
+ // allow any levels for WhiteBox
+ assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level");
+
+ if (m->is_abstract()) return false;
+ if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
+
+ // Math intrinsics should never be compiled as this can lead to
+ // monotonicity problems because the interpreter will prefer the
+ // compiled code to the intrinsic version. This can't happen in
+ // production because the invocation counter can't be incremented
+ // but we shouldn't expose the system to this problem in testing
+ // modes.
+ if (!AbstractInterpreter::can_be_compiled(m)) {
+ return false;
+ }
+ if (comp_level == CompLevel_all) {
+ if (TieredCompilation) {
+ // enough to be compilable at any level for tiered
+ return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization);
+ } else {
+ // must be compilable at available level for non-tiered
+ return !m->is_not_compilable(CompLevel_highest_tier);
+ }
+ } else if (is_compile(comp_level)) {
+ return !m->is_not_compilable(comp_level);
+ }
+ return false;
+}
+
+// Returns true if m is allowed to be osr compiled
+bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
+ bool result = false;
+ if (comp_level == CompLevel_all) {
+ if (TieredCompilation) {
+ // enough to be osr compilable at any level for tiered
+ result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
+ } else {
+ // must be osr compilable at available level for non-tiered
+ result = !m->is_not_osr_compilable(CompLevel_highest_tier);
+ }
+ } else if (is_compile(comp_level)) {
+ result = !m->is_not_osr_compilable(comp_level);
+ }
+ return (result && can_be_compiled(m, comp_level));
+}
+
+bool CompilationPolicy::is_compilation_enabled() {
+ // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
+ return CompileBroker::should_compile_new_jobs();
+}
+
+CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
+ // Remove unloaded methods from the queue
+ for (CompileTask* task = compile_queue->first(); task != NULL; ) {
+ CompileTask* next = task->next();
+ if (task->is_unloaded()) {
+ compile_queue->remove_and_mark_stale(task);
+ }
+ task = next;
+ }
+#if INCLUDE_JVMCI
+ if (UseJVMCICompiler && !BackgroundCompilation) {
+ /*
+ * In blocking compilation mode, the CompileBroker will make
+ * compilations submitted by a JVMCI compiler thread non-blocking. These
+ * compilations should be scheduled after all blocking compilations
+ * to service non-compiler related compilations sooner and reduce the
+ * chance of such compilations timing out.
+ */
+ for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) {
+ if (task->is_blocking()) {
+ return task;
+ }
+ }
+ }
+#endif
+ return compile_queue->first();
+}
+
+#ifndef PRODUCT
+void SimpleCompPolicy::trace_osr_completion(nmethod* osr_nm) {
+ if (TraceOnStackReplacement) {
+ if (osr_nm == NULL) tty->print_cr("compilation failed");
+ else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm));
+ }
+}
+#endif // !PRODUCT
+
+void SimpleCompPolicy::initialize() {
+ // Setup the compiler thread numbers
+ if (CICompilerCountPerCPU) {
+ // Example: if CICompilerCountPerCPU is true, then we get
+ // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
+ // May help big-app startup time.
+ _compiler_count = MAX2(log2_int(os::active_processor_count())-1,1);
+ // Make sure there is enough space in the code cache to hold all the compiler buffers
+ size_t buffer_size = 1;
+#ifdef COMPILER1
+ buffer_size = is_client_compilation_mode_vm() ? Compiler::code_buffer_size() : buffer_size;
+#endif
+#ifdef COMPILER2
+ buffer_size = is_server_compilation_mode_vm() ? C2Compiler::initial_code_buffer_size() : buffer_size;
+#endif
+ int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
+ if (_compiler_count > max_count) {
+ // Lower the compiler count such that all buffers fit into the code cache
+ _compiler_count = MAX2(max_count, 1);
+ }
+ FLAG_SET_ERGO(CICompilerCount, _compiler_count);
+ } else {
+ _compiler_count = CICompilerCount;
+ }
+}
+
+// Note: this policy is used ONLY if TieredCompilation is off.
+// compiler_count() behaves the following way:
+// - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return
+// zero for the c1 compilation levels in server compilation mode runs
+// and c2 compilation levels in client compilation mode runs.
+// - with COMPILER2 not defined it should return zero for c2 compilation levels.
+// - with COMPILER1 not defined it should return zero for c1 compilation levels.
+// - if neither is defined - always return zero.
+int SimpleCompPolicy::compiler_count(CompLevel comp_level) {
+ assert(!TieredCompilation, "This policy should not be used with TieredCompilation");
+ if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||)
+ is_client_compilation_mode_vm() && is_c1_compile(comp_level)) {
+ return _compiler_count;
+ }
+ return 0;
+}
+
+void SimpleCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
+ // Make sure invocation and backedge counter doesn't overflow again right away
+ // as would be the case for native methods.
+
+ // BUT also make sure the method doesn't look like it was never executed.
+ // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
+ MethodCounters* mcs = m->method_counters();
+ assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
+ mcs->invocation_counter()->set_carry();
+ mcs->backedge_counter()->set_carry();
+
+ assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
+}
+
+void SimpleCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
+ // Delay next back-branch event but pump up invocation counter to trigger
+ // whole method compilation.
+ MethodCounters* mcs = m->method_counters();
+ assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
+ InvocationCounter* i = mcs->invocation_counter();
+ InvocationCounter* b = mcs->backedge_counter();
+
+ // Don't set invocation_counter's value too low otherwise the method will
+ // look like immature (ic < ~5300) which prevents the inlining based on
+ // the type profiling.
+ i->set(i->state(), CompileThreshold);
+ // Don't reset counter too low - it is used to check if OSR method is ready.
+ b->set(b->state(), CompileThreshold / 2);
+}
+
+//
+// CounterDecay
+//
+// Iterates through invocation counters and decrements them. This
+// is done at each safepoint.
+//
+class CounterDecay : public AllStatic {
+ static jlong _last_timestamp;
+ static void do_method(Method* m) {
+ MethodCounters* mcs = m->method_counters();
+ if (mcs != NULL) {
+ mcs->invocation_counter()->decay();
+ }
+ }
+public:
+ static void decay();
+ static bool is_decay_needed() {
+ return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
+ }
+};
+
+jlong CounterDecay::_last_timestamp = 0;
+
+void CounterDecay::decay() {
+ _last_timestamp = os::javaTimeMillis();
+
+ // This operation is going to be performed only at the end of a safepoint
+ // and hence GC's will not be going on, all Java mutators are suspended
+ // at this point and hence SystemDictionary_lock is also not needed.
+ assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
+ size_t nclasses = ClassLoaderDataGraph::num_instance_classes();
+ size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
+ CounterHalfLifeTime);
+ for (size_t i = 0; i < classes_per_tick; i++) {
+ InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
+ if (k != NULL) {
+ k->methods_do(do_method);
+ }
+ }
+}
+
+// Called at the end of the safepoint
+void SimpleCompPolicy::do_safepoint_work() {
+ if(UseCounterDecay && CounterDecay::is_decay_needed()) {
+ CounterDecay::decay();
+ }
+}
+
+void SimpleCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
+ ScopeDesc* sd = trap_scope;
+ MethodCounters* mcs;
+ InvocationCounter* c;
+ for (; !sd->is_top(); sd = sd->sender()) {
+ mcs = sd->method()->method_counters();
+ if (mcs != NULL) {
+ // Reset ICs of inlined methods, since they can trigger compilations also.
+ mcs->invocation_counter()->reset();
+ }
+ }
+ mcs = sd->method()->method_counters();
+ if (mcs != NULL) {
+ c = mcs->invocation_counter();
+ if (is_osr) {
+ // It was an OSR method, so bump the count higher.
+ c->set(c->state(), CompileThreshold);
+ } else {
+ c->reset();
+ }
+ mcs->backedge_counter()->reset();
+ }
+}
+
+// This method can be called by any component of the runtime to notify the policy
+// that it's recommended to delay the compilation of this method.
+void SimpleCompPolicy::delay_compilation(Method* method) {
+ MethodCounters* mcs = method->method_counters();
+ if (mcs != NULL) {
+ mcs->invocation_counter()->decay();
+ mcs->backedge_counter()->decay();
+ }
+}
+
+void SimpleCompPolicy::disable_compilation(Method* method) {
+ MethodCounters* mcs = method->method_counters();
+ if (mcs != NULL) {
+ mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
+ mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
+ }
+}
+
+CompileTask* SimpleCompPolicy::select_task(CompileQueue* compile_queue) {
+ return select_task_helper(compile_queue);
+}
+
+bool SimpleCompPolicy::is_mature(Method* method) {
+ MethodData* mdo = method->method_data();
+ assert(mdo != NULL, "Should be");
+ uint current = mdo->mileage_of(method);
+ uint initial = mdo->creation_mileage();
+ if (current < initial)
+ return true; // some sort of overflow
+ uint target;
+ if (ProfileMaturityPercentage <= 0)
+ target = (uint) -ProfileMaturityPercentage; // absolute value
+ else
+ target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
+ return (current >= initial + target);
+}
+
+nmethod* SimpleCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
+ int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
+ assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
+ NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
+ if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
+ // If certain JVMTI events (e.g. frame pop event) are requested then the
+ // thread is forced to remain in interpreted code. This is
+ // implemented partly by a check in the run_compiled_code
+ // section of the interpreter whether we should skip running
+ // compiled code, and partly by skipping OSR compiles for
+ // interpreted-only threads.
+ if (bci != InvocationEntryBci) {
+ reset_counter_for_back_branch_event(method);
+ return NULL;
+ }
+ }
+ if (ReplayCompiles) {
+ // Don't trigger other compiles in testing mode
+ if (bci == InvocationEntryBci) {
+ reset_counter_for_invocation_event(method);
+ } else {
+ reset_counter_for_back_branch_event(method);
+ }
+ return NULL;
+ }
+
+ if (bci == InvocationEntryBci) {
+ // when code cache is full, compilation gets switched off, UseCompiler
+ // is set to false
+ if (!method->has_compiled_code() && UseCompiler) {
+ method_invocation_event(method, thread);
+ } else {
+ // Force counter overflow on method entry, even if no compilation
+ // happened. (The method_invocation_event call does this also.)
+ reset_counter_for_invocation_event(method);
+ }
+ // compilation at an invocation overflow no longer goes and retries test for
+ // compiled method. We always run the loser of the race as interpreted.
+ // so return NULL
+ return NULL;
+ } else {
+ // counter overflow in a loop => try to do on-stack-replacement
+ nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
+ NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
+ // when code cache is full, we should not compile any more...
+ if (osr_nm == NULL && UseCompiler) {
+ method_back_branch_event(method, bci, thread);
+ osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
+ }
+ if (osr_nm == NULL) {
+ reset_counter_for_back_branch_event(method);
+ return NULL;
+ }
+ return osr_nm;
+ }
+ return NULL;
+}
+
+#ifndef PRODUCT
+void SimpleCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
+ if (TraceInvocationCounterOverflow) {
+ MethodCounters* mcs = m->method_counters();
+ assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
+ InvocationCounter* ic = mcs->invocation_counter();
+ InvocationCounter* bc = mcs->backedge_counter();
+ ResourceMark rm;
+ if (bci == InvocationEntryBci) {
+ tty->print("comp-policy cntr ovfl @ %d in entry of ", bci);
+ } else {
+ tty->print("comp-policy cntr ovfl @ %d in loop of ", bci);
+ }
+ m->print_value();
+ tty->cr();
+ ic->print();
+ bc->print();
+ if (ProfileInterpreter) {
+ if (bci != InvocationEntryBci) {
+ MethodData* mdo = m->method_data();
+ if (mdo != NULL) {
+ ProfileData *pd = mdo->bci_to_data(branch_bci);
+ if (pd == NULL) {
+ tty->print_cr("back branch count = N/A (missing ProfileData)");
+ } else {
+ tty->print_cr("back branch count = %d", pd->as_JumpData()->taken());
+ }
+ }
+ }
+ }
+ }
+}
+
+void SimpleCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
+ if (TraceOnStackReplacement) {
+ ResourceMark rm;
+ tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
+ method->print_short_name(tty);
+ tty->print_cr(" at bci %d", bci);
+ }
+}
+#endif // !PRODUCT
+
+void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
+ const int comp_level = CompLevel_highest_tier;
+ const int hot_count = m->invocation_count();
+ reset_counter_for_invocation_event(m);
+
+ if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
+ CompiledMethod* nm = m->code();
+ if (nm == NULL ) {
+ CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread);
+ }
+ }
+}
+
+void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
+ const int comp_level = CompLevel_highest_tier;
+ const int hot_count = m->backedge_count();
+
+ if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
+ CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
+ NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/compiler/compilationPolicy.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_COMPILER_COMPILATIONPOLICY_HPP
+#define SHARE_COMPILER_COMPILATIONPOLICY_HPP
+
+#include "code/nmethod.hpp"
+#include "compiler/compileBroker.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/vmOperations.hpp"
+#include "utilities/growableArray.hpp"
+
+// The CompilationPolicy selects which method (if any) should be compiled.
+// It also decides which methods must always be compiled (i.e., are never
+// interpreted).
+class CompileTask;
+class CompileQueue;
+
+class CompilationPolicy : public CHeapObj<mtCompiler> {
+ static CompilationPolicy* _policy;
+
+ // m must be compiled before executing it
+ static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_all);
+
+public:
+ // If m must_be_compiled then request a compilation from the CompileBroker.
+ // This supports the -Xcomp option.
+ static void compile_if_required(const methodHandle& m, TRAPS);
+
+ // m is allowed to be compiled
+ static bool can_be_compiled(const methodHandle& m, int comp_level = CompLevel_all);
+ // m is allowed to be osr compiled
+ static bool can_be_osr_compiled(const methodHandle& m, int comp_level = CompLevel_all);
+ static bool is_compilation_enabled();
+ static void set_policy(CompilationPolicy* policy) { _policy = policy; }
+ static CompilationPolicy* policy() { return _policy; }
+
+ static CompileTask* select_task_helper(CompileQueue* compile_queue);
+
+ // Return initial compile level that is used with Xcomp
+ virtual CompLevel initial_compile_level() = 0;
+ virtual int compiler_count(CompLevel comp_level) = 0;
+ // main notification entry, return a pointer to an nmethod if the OSR is required,
+ // returns NULL otherwise.
+ virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) = 0;
+ // safepoint() is called at the end of the safepoint
+ virtual void do_safepoint_work() = 0;
+ // reprofile request
+ virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
+ // delay_compilation(method) can be called by any component of the runtime to notify the policy
+ // that it's recommended to delay the compilation of this method.
+ virtual void delay_compilation(Method* method) = 0;
+ // disable_compilation() is called whenever the runtime decides to disable compilation of the
+ // specified method.
+ virtual void disable_compilation(Method* method) = 0;
+ // Select task is called by CompileBroker. The queue is guaranteed to have at least one
+ // element and is locked. The function should select one and return it.
+ virtual CompileTask* select_task(CompileQueue* compile_queue) = 0;
+ // Tell the runtime if we think a given method is adequately profiled.
+ virtual bool is_mature(Method* method) = 0;
+ // Do policy initialization
+ virtual void initialize() = 0;
+ virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; }
+};
+
+// A simple compilation policy.
+class SimpleCompPolicy : public CompilationPolicy {
+ int _compiler_count;
+ private:
+ static void trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci);
+ static void trace_osr_request(const methodHandle& method, nmethod* osr, int bci);
+ static void trace_osr_completion(nmethod* osr_nm);
+ void reset_counter_for_invocation_event(const methodHandle& method);
+ void reset_counter_for_back_branch_event(const methodHandle& method);
+ void method_invocation_event(const methodHandle& m, JavaThread* thread);
+ void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
+ public:
+ SimpleCompPolicy() : _compiler_count(0) { }
+ virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; }
+ virtual int compiler_count(CompLevel comp_level);
+ virtual void do_safepoint_work();
+ virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
+ virtual void delay_compilation(Method* method);
+ virtual void disable_compilation(Method* method);
+ virtual bool is_mature(Method* method);
+ virtual void initialize();
+ virtual CompileTask* select_task(CompileQueue* compile_queue);
+ virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread);
+};
+
+
+#endif // SHARE_COMPILER_COMPILATIONPOLICY_HPP
--- a/src/hotspot/share/compiler/compileBroker.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/compiler/compileBroker.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -30,6 +30,7 @@
#include "code/codeCache.hpp"
#include "code/codeHeapState.hpp"
#include "code/dependencyContext.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
@@ -48,7 +49,6 @@
#include "prims/whitebox.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.inline.hpp"
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -188,14 +188,6 @@
#endif // TIERED
void CompilerConfig::set_tiered_flags() {
- // With tiered, set default policy to SimpleThresholdPolicy, which is 2.
- if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
- FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
- }
- if (CompilationPolicyChoice < 2) {
- vm_exit_during_initialization(
- "Incompatible compilation policy selected", NULL);
- }
// Increase the code cache size - tiered compiles a lot more.
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_ERGO(ReservedCodeCacheSize,
@@ -420,17 +412,6 @@
if (TieredCompilation) {
set_tiered_flags();
} else {
- int max_compilation_policy_choice = 1;
-#ifdef COMPILER2
- if (is_server_compilation_mode_vm()) {
- max_compilation_policy_choice = 2;
- }
-#endif
- // Check if the policy is valid.
- if (CompilationPolicyChoice >= max_compilation_policy_choice) {
- vm_exit_during_initialization(
- "Incompatible compilation policy selected", NULL);
- }
// Scale CompileThreshold
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
--- a/src/hotspot/share/compiler/compilerDirectives.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -66,8 +66,7 @@
cflags(VectorizeDebug, uintx, 0, VectorizeDebug) \
cflags(CloneMapDebug, bool, false, CloneMapDebug) \
cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
- cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) \
-ZGC_ONLY(cflags(ZTraceLoadBarriers, bool, false, ZTraceLoadBarriers))
+ cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit)
#else
#define compilerdirectives_c2_flags(cflags)
#endif
--- a/src/hotspot/share/compiler/oopMap.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/compiler/oopMap.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -48,29 +48,25 @@
// OopMapStream
-OopMapStream::OopMapStream(OopMap* oop_map, int oop_types_mask) {
+OopMapStream::OopMapStream(OopMap* oop_map) {
_stream = new CompressedReadStream(oop_map->write_stream()->buffer());
- _mask = oop_types_mask;
_size = oop_map->omv_count();
_position = 0;
_valid_omv = false;
}
-OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask) {
+OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) {
_stream = new CompressedReadStream(oop_map->data_addr());
- _mask = oop_types_mask;
_size = oop_map->count();
_position = 0;
_valid_omv = false;
}
void OopMapStream::find_next() {
- while(_position++ < _size) {
+ if (_position++ < _size) {
_omv.read_from(_stream);
- if(((int)_omv.type() & _mask) > 0) {
- _valid_omv = true;
- return;
- }
+ _valid_omv = true;
+ return;
}
_valid_omv = false;
}
@@ -140,16 +136,7 @@
assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
debug_only( _locs_used[reg->value()] = x; )
- OopMapValue o(reg, x);
-
- if(x == OopMapValue::callee_saved_value) {
- // This can never be a stack location, so we don't need to transform it.
- assert(optional->is_reg(), "Trying to callee save a stack location");
- o.set_content_reg(optional);
- } else if(x == OopMapValue::derived_oop_value) {
- o.set_content_reg(optional);
- }
-
+ OopMapValue o(reg, x, optional);
o.write_on(write_stream());
increment_count();
}
@@ -160,11 +147,6 @@
}
-void OopMap::set_value(VMReg reg) {
- // At this time, we don't need value entries in our OopMap.
-}
-
-
void OopMap::set_narrowoop(VMReg reg) {
set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
}
@@ -328,7 +310,7 @@
// changed before derived pointer offset has been collected)
OopMapValue omv;
{
- OopMapStream oms(map,OopMapValue::derived_oop_value);
+ OopMapStream oms(map);
if (!oms.is_done()) {
#ifndef TIERED
COMPILER1_PRESENT(ShouldNotReachHere();)
@@ -340,27 +322,28 @@
#endif // !TIERED
do {
omv = oms.current();
- oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
- guarantee(loc != NULL, "missing saved register");
- oop *derived_loc = loc;
- oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
- // Ignore NULL oops and decoded NULL narrow oops which
- // equal to CompressedOops::base() when a narrow oop
- // implicit null check is used in compiled code.
- // The narrow_oop_base could be NULL or be the address
- // of the page below heap depending on compressed oops mode.
- if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
- derived_oop_fn(base_loc, derived_loc);
+ if (omv.type() == OopMapValue::derived_oop_value) {
+ oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
+ guarantee(loc != NULL, "missing saved register");
+ oop *derived_loc = loc;
+ oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
+ // Ignore NULL oops and decoded NULL narrow oops which
+ // equal to CompressedOops::base() when a narrow oop
+ // implicit null check is used in compiled code.
+ // The narrow_oop_base could be NULL or be the address
+ // of the page below heap depending on compressed oops mode.
+ if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
+ derived_oop_fn(base_loc, derived_loc);
+ }
}
oms.next();
} while (!oms.is_done());
}
}
- // We want coop and oop oop_types
- int mask = OopMapValue::oop_value | OopMapValue::narrowoop_value;
{
- for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {
+ // We want coop and oop oop_types
+ for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
omv = oms.current();
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
// It should be an error if no location can be found for a
@@ -436,12 +419,14 @@
assert(map != NULL, "no ptr map found");
DEBUG_ONLY(int nof_callee = 0;)
- for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
+ for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
OopMapValue omv = oms.current();
- VMReg reg = omv.content_reg();
- oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
- reg_map->set_location(reg, (address) loc);
- DEBUG_ONLY(nof_callee++;)
+ if (omv.type() == OopMapValue::callee_saved_value) {
+ VMReg reg = omv.content_reg();
+ oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
+ reg_map->set_location(reg, (address) loc);
+ DEBUG_ONLY(nof_callee++;)
+ }
}
// Check that runtime stubs save all callee-saved registers
@@ -452,25 +437,6 @@
#endif // COMPILER2
}
-//=============================================================================
-// Non-Product code
-
-#ifndef PRODUCT
-
-bool ImmutableOopMap::has_derived_pointer() const {
-#if !defined(TIERED) && !INCLUDE_JVMCI
- COMPILER1_PRESENT(return false);
-#endif // !TIERED
-#if COMPILER2_OR_JVMCI
- OopMapStream oms(this,OopMapValue::derived_oop_value);
- return oms.is_done();
-#else
- return false;
-#endif // COMPILER2_OR_JVMCI
-}
-
-#endif //PRODUCT
-
// Printing code is present in product build for -XX:+PrintAssembly.
static
--- a/src/hotspot/share/compiler/oopMap.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/compiler/oopMap.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -53,7 +53,7 @@
public:
// Constants
- enum { type_bits = 4,
+ enum { type_bits = 2,
register_bits = BitsPerShort - type_bits };
enum { type_shift = 0,
@@ -64,19 +64,41 @@
register_mask = right_n_bits(register_bits),
register_mask_in_place = register_mask << register_shift };
- enum oop_types { // must fit in type_bits
- unused_value =0, // powers of 2, for masking OopMapStream
- oop_value = 1,
- narrowoop_value = 2,
- callee_saved_value = 4,
- derived_oop_value= 8 };
+ enum oop_types {
+ oop_value,
+ narrowoop_value,
+ callee_saved_value,
+ derived_oop_value,
+ unused_value = -1 // Only used as a sentinel value
+ };
// Constructors
OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); }
- OopMapValue (VMReg reg, oop_types t) { set_reg_type(reg, t); set_content_reg(VMRegImpl::Bad()); }
- OopMapValue (VMReg reg, oop_types t, VMReg reg2) { set_reg_type(reg, t); set_content_reg(reg2); }
- OopMapValue (CompressedReadStream* stream) { read_from(stream); }
+ OopMapValue (VMReg reg, oop_types t, VMReg reg2) {
+ set_reg_type(reg, t);
+ set_content_reg(reg2);
+ }
+
+ private:
+ void set_reg_type(VMReg p, oop_types t) {
+ set_value((p->value() << register_shift) | t);
+ assert(reg() == p, "sanity check" );
+ assert(type() == t, "sanity check" );
+ }
+ void set_content_reg(VMReg r) {
+ if (is_callee_saved()) {
+ // This can never be a stack location, so we don't need to transform it.
+ assert(r->is_reg(), "Trying to callee save a stack location");
+ } else if (is_derived_oop()) {
+ assert (r->is_valid(), "must have a valid VMReg");
+ } else {
+ assert (!r->is_valid(), "valid VMReg not allowed");
+ }
+ _content_reg = r->value();
+ }
+
+ public:
// Archiving
void write_on(CompressedWriteStream* stream) {
stream->write_int(value());
@@ -94,15 +116,10 @@
// Querying
bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; }
- bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
+ bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
- void set_oop() { set_value((value() & register_mask_in_place) | oop_value); }
- void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); }
- void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); }
- void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); }
-
VMReg reg() const { return VMRegImpl::as_VMReg(mask_bits(value(), register_mask_in_place) >> register_shift); }
oop_types type() const { return (oop_types)mask_bits(value(), type_mask_in_place); }
@@ -110,15 +127,7 @@
return (p->value() == (p->value() & register_mask));
}
- void set_reg_type(VMReg p, oop_types t) {
- set_value((p->value() << register_shift) | t);
- assert(reg() == p, "sanity check" );
- assert(type() == t, "sanity check" );
- }
-
-
VMReg content_reg() const { return VMRegImpl::as_VMReg(_content_reg, true); }
- void set_content_reg(VMReg r) { _content_reg = r->value(); }
// Physical location queries
bool is_register_loc() { return reg()->is_reg(); }
@@ -156,6 +165,8 @@
enum DeepCopyToken { _deep_copy_token };
OopMap(DeepCopyToken, OopMap* source); // used only by deep_copy
+ void set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional);
+
public:
OopMap(int frame_size, int arg_count);
@@ -173,19 +184,14 @@
// frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
// slots to hold 4-byte values like ints and floats in the LP64 build.
void set_oop ( VMReg local);
- void set_value( VMReg local);
void set_narrowoop(VMReg local);
- void set_dead ( VMReg local);
void set_callee_saved( VMReg local, VMReg caller_machine_register );
void set_derived_oop ( VMReg local, VMReg derived_from_local_register );
- void set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional);
int heap_size() const;
void copy_data_to(address addr) const;
OopMap* deep_copy();
- bool has_derived_pointer() const PRODUCT_RETURN0;
-
bool legal_vm_reg_name(VMReg local) {
return OopMapValue::legal_vm_reg_name(local);
}
@@ -269,7 +275,6 @@
public:
ImmutableOopMap(const OopMap* oopmap);
- bool has_derived_pointer() const PRODUCT_RETURN0;
int count() const { return _count; }
#ifdef ASSERT
int nr_of_bytes() const; // this is an expensive operation, only used in debug builds
@@ -334,7 +339,6 @@
class OopMapStream : public StackObj {
private:
CompressedReadStream* _stream;
- int _mask;
int _size;
int _position;
bool _valid_omv;
@@ -342,8 +346,8 @@
void find_next();
public:
- OopMapStream(OopMap* oop_map, int oop_types_mask = OopMapValue::type_mask_in_place);
- OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask = OopMapValue::type_mask_in_place);
+ OopMapStream(OopMap* oop_map);
+ OopMapStream(const ImmutableOopMap* oop_map);
bool is_done() { if(!_valid_omv) { find_next(); } return !_valid_omv; }
void next() { find_next(); }
OopMapValue current() { return _omv; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/compiler/tieredThresholdPolicy.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "compiler/compileBroker.hpp"
+#include "compiler/compilerOracle.hpp"
+#include "compiler/tieredThresholdPolicy.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "code/scopeDesc.hpp"
+#include "oops/method.inline.hpp"
+#if INCLUDE_JVMCI
+#include "jvmci/jvmci.hpp"
+#endif
+
+#ifdef TIERED
+
+#include "c1/c1_Compiler.hpp"
+#include "opto/c2compiler.hpp"
+
+template<CompLevel level>
+bool TieredThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) {
+ double threshold_scaling;
+ if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
+ scale *= threshold_scaling;
+ }
+ switch(level) {
+ case CompLevel_aot:
+ return (i >= Tier3AOTInvocationThreshold * scale) ||
+ (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale);
+ case CompLevel_none:
+ case CompLevel_limited_profile:
+ return (i >= Tier3InvocationThreshold * scale) ||
+ (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
+ case CompLevel_full_profile:
+ return (i >= Tier4InvocationThreshold * scale) ||
+ (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
+ }
+ return true;
+}
+
+template<CompLevel level>
+bool TieredThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) {
+ double threshold_scaling;
+ if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
+ scale *= threshold_scaling;
+ }
+ switch(level) {
+ case CompLevel_aot:
+ return b >= Tier3AOTBackEdgeThreshold * scale;
+ case CompLevel_none:
+ case CompLevel_limited_profile:
+ return b >= Tier3BackEdgeThreshold * scale;
+ case CompLevel_full_profile:
+ return b >= Tier4BackEdgeThreshold * scale;
+ }
+ return true;
+}
+
+// Simple methods are as good being compiled with C1 as C2.
+// Determine if a given method is such a case.
+bool TieredThresholdPolicy::is_trivial(Method* method) {
+ if (method->is_accessor() ||
+ method->is_constant_getter()) {
+ return true;
+ }
+ return false;
+}
+
+bool TieredThresholdPolicy::should_compile_at_level_simple(Method* method) {
+ if (TieredThresholdPolicy::is_trivial(method)) {
+ return true;
+ }
+#if INCLUDE_JVMCI
+ if (UseJVMCICompiler) {
+ AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
+ if (comp != NULL && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
+ return true;
+ }
+ }
+#endif
+ return false;
+}
+
+CompLevel TieredThresholdPolicy::comp_level(Method* method) {
+ CompiledMethod *nm = method->code();
+ if (nm != NULL && nm->is_in_use()) {
+ return (CompLevel)nm->comp_level();
+ }
+ return CompLevel_none;
+}
+
+void TieredThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) {
+ int invocation_count = mh->invocation_count();
+ int backedge_count = mh->backedge_count();
+ MethodData* mdh = mh->method_data();
+ int mdo_invocations = 0, mdo_backedges = 0;
+ int mdo_invocations_start = 0, mdo_backedges_start = 0;
+ if (mdh != NULL) {
+ mdo_invocations = mdh->invocation_count();
+ mdo_backedges = mdh->backedge_count();
+ mdo_invocations_start = mdh->invocation_count_start();
+ mdo_backedges_start = mdh->backedge_count_start();
+ }
+ tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
+ invocation_count, backedge_count, prefix,
+ mdo_invocations, mdo_invocations_start,
+ mdo_backedges, mdo_backedges_start);
+ tty->print(" %smax levels=%d,%d", prefix,
+ mh->highest_comp_level(), mh->highest_osr_comp_level());
+}
+
+// Print an event.
+void TieredThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh,
+ int bci, CompLevel level) {
+ bool inlinee_event = mh() != imh();
+
+ ttyLocker tty_lock;
+ tty->print("%lf: [", os::elapsedTime());
+
+ switch(type) {
+ case CALL:
+ tty->print("call");
+ break;
+ case LOOP:
+ tty->print("loop");
+ break;
+ case COMPILE:
+ tty->print("compile");
+ break;
+ case REMOVE_FROM_QUEUE:
+ tty->print("remove-from-queue");
+ break;
+ case UPDATE_IN_QUEUE:
+ tty->print("update-in-queue");
+ break;
+ case REPROFILE:
+ tty->print("reprofile");
+ break;
+ case MAKE_NOT_ENTRANT:
+ tty->print("make-not-entrant");
+ break;
+ default:
+ tty->print("unknown");
+ }
+
+ tty->print(" level=%d ", level);
+
+ ResourceMark rm;
+ char *method_name = mh->name_and_sig_as_C_string();
+ tty->print("[%s", method_name);
+ if (inlinee_event) {
+ char *inlinee_name = imh->name_and_sig_as_C_string();
+ tty->print(" [%s]] ", inlinee_name);
+ }
+ else tty->print("] ");
+ tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
+ CompileBroker::queue_size(CompLevel_full_optimization));
+
+ print_specific(type, mh, imh, bci, level);
+
+ if (type != COMPILE) {
+ print_counters("", mh);
+ if (inlinee_event) {
+ print_counters("inlinee ", imh);
+ }
+ tty->print(" compilable=");
+ bool need_comma = false;
+ if (!mh->is_not_compilable(CompLevel_full_profile)) {
+ tty->print("c1");
+ need_comma = true;
+ }
+ if (!mh->is_not_osr_compilable(CompLevel_full_profile)) {
+ if (need_comma) tty->print(",");
+ tty->print("c1-osr");
+ need_comma = true;
+ }
+ if (!mh->is_not_compilable(CompLevel_full_optimization)) {
+ if (need_comma) tty->print(",");
+ tty->print("c2");
+ need_comma = true;
+ }
+ if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) {
+ if (need_comma) tty->print(",");
+ tty->print("c2-osr");
+ }
+ tty->print(" status=");
+ if (mh->queued_for_compilation()) {
+ tty->print("in-queue");
+ } else tty->print("idle");
+ }
+ tty->print_cr("]");
+}
+
+void TieredThresholdPolicy::initialize() {
+ int count = CICompilerCount;
+ bool c1_only = TieredStopAtLevel < CompLevel_full_optimization;
+#ifdef _LP64
+ // Turn on ergonomic compiler count selection
+ if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
+ FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
+ }
+ if (CICompilerCountPerCPU) {
+ // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
+ int log_cpu = log2_int(os::active_processor_count());
+ int loglog_cpu = log2_int(MAX2(log_cpu, 1));
+ count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
+ // Make sure there is enough space in the code cache to hold all the compiler buffers
+ size_t c1_size = Compiler::code_buffer_size();
+ size_t c2_size = C2Compiler::initial_code_buffer_size();
+ size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
+ int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
+ if (count > max_count) {
+ // Lower the compiler count such that all buffers fit into the code cache
+ count = MAX2(max_count, c1_only ? 1 : 2);
+ }
+ FLAG_SET_ERGO(CICompilerCount, count);
+ }
+#else
+ // On 32-bit systems, the number of compiler threads is limited to 3.
+ // On these systems, the virtual address space available to the JVM
+ // is usually limited to 2-4 GB (the exact value depends on the platform).
+ // As the compilers (especially C2) can consume a large amount of
+ // memory, scaling the number of compiler threads with the number of
+ // available cores can result in the exhaustion of the address space
+ /// available to the VM and thus cause the VM to crash.
+ if (FLAG_IS_DEFAULT(CICompilerCount)) {
+ count = 3;
+ FLAG_SET_ERGO(CICompilerCount, count);
+ }
+#endif
+
+ if (c1_only) {
+ // No C2 compiler thread required
+ set_c1_count(count);
+ } else {
+ set_c1_count(MAX2(count / 3, 1));
+ set_c2_count(MAX2(count - c1_count(), 1));
+ }
+ assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
+
+ // Some inlining tuning
+#ifdef X86
+ if (FLAG_IS_DEFAULT(InlineSmallCode)) {
+ FLAG_SET_DEFAULT(InlineSmallCode, 2000);
+ }
+#endif
+
+#if defined SPARC || defined AARCH64
+ if (FLAG_IS_DEFAULT(InlineSmallCode)) {
+ FLAG_SET_DEFAULT(InlineSmallCode, 2500);
+ }
+#endif
+
+ set_increase_threshold_at_ratio();
+ set_start_time(os::javaTimeMillis());
+}
+
+void TieredThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
+ if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) {
+ counter->set_carry_flag();
+ }
+}
+
+// Set carry flags on the counters if necessary
+void TieredThresholdPolicy::handle_counter_overflow(Method* method) {
+ MethodCounters *mcs = method->method_counters();
+ if (mcs != NULL) {
+ set_carry_if_necessary(mcs->invocation_counter());
+ set_carry_if_necessary(mcs->backedge_counter());
+ }
+ MethodData* mdo = method->method_data();
+ if (mdo != NULL) {
+ set_carry_if_necessary(mdo->invocation_counter());
+ set_carry_if_necessary(mdo->backedge_counter());
+ }
+}
+
+// Called with the queue locked and with at least one element
+CompileTask* TieredThresholdPolicy::select_task(CompileQueue* compile_queue) {
+ CompileTask *max_blocking_task = NULL;
+ CompileTask *max_task = NULL;
+ Method* max_method = NULL;
+ jlong t = os::javaTimeMillis();
+ // Iterate through the queue and find a method with a maximum rate.
+ for (CompileTask* task = compile_queue->first(); task != NULL;) {
+ CompileTask* next_task = task->next();
+ Method* method = task->method();
+ // If a method was unloaded or has been stale for some time, remove it from the queue.
+ // Blocking tasks and tasks submitted from whitebox API don't become stale
+ if (task->is_unloaded() || (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method))) {
+ if (!task->is_unloaded()) {
+ if (PrintTieredEvents) {
+ print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
+ }
+ method->clear_queued_for_compilation();
+ }
+ compile_queue->remove_and_mark_stale(task);
+ task = next_task;
+ continue;
+ }
+ update_rate(t, method);
+ if (max_task == NULL || compare_methods(method, max_method)) {
+ // Select a method with the highest rate
+ max_task = task;
+ max_method = method;
+ }
+
+ if (task->is_blocking()) {
+ if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
+ max_blocking_task = task;
+ }
+ }
+
+ task = next_task;
+ }
+
+ if (max_blocking_task != NULL) {
+ // In blocking compilation mode, the CompileBroker will make
+ // compilations submitted by a JVMCI compiler thread non-blocking. These
+ // compilations should be scheduled after all blocking compilations
+ // to service non-compiler related compilations sooner and reduce the
+ // chance of such compilations timing out.
+ max_task = max_blocking_task;
+ max_method = max_task->method();
+ }
+
+ if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile &&
+ TieredStopAtLevel > CompLevel_full_profile &&
+ max_method != NULL && is_method_profiled(max_method)) {
+ max_task->set_comp_level(CompLevel_limited_profile);
+
+ if (CompileBroker::compilation_is_complete(max_method, max_task->osr_bci(), CompLevel_limited_profile)) {
+ if (PrintTieredEvents) {
+ print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
+ }
+ compile_queue->remove_and_mark_stale(max_task);
+ max_method->clear_queued_for_compilation();
+ return NULL;
+ }
+
+ if (PrintTieredEvents) {
+ print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
+ }
+ }
+
+ return max_task;
+}
+
+void TieredThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
+ for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
+ if (PrintTieredEvents) {
+ methodHandle mh(sd->method());
+ print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none);
+ }
+ MethodData* mdo = sd->method()->method_data();
+ if (mdo != NULL) {
+ mdo->reset_start_counters();
+ }
+ if (sd->is_top()) break;
+ }
+}
+
+nmethod* TieredThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee,
+ int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
+ if (comp_level == CompLevel_none &&
+ JvmtiExport::can_post_interpreter_events() &&
+ thread->is_interp_only_mode()) {
+ return NULL;
+ }
+ if (ReplayCompiles) {
+ // Don't trigger other compiles in testing mode
+ return NULL;
+ }
+
+ handle_counter_overflow(method());
+ if (method() != inlinee()) {
+ handle_counter_overflow(inlinee());
+ }
+
+ if (PrintTieredEvents) {
+ print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level);
+ }
+
+ if (bci == InvocationEntryBci) {
+ method_invocation_event(method, inlinee, comp_level, nm, thread);
+ } else {
+ // method == inlinee if the event originated in the main method
+ method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
+ // Check if event led to a higher level OSR compilation
+ CompLevel expected_comp_level = comp_level;
+ if (inlinee->is_not_osr_compilable(expected_comp_level)) {
+ // It's not possble to reach the expected level so fall back to simple.
+ expected_comp_level = CompLevel_simple;
+ }
+ nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
+ assert(osr_nm == NULL || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
+ if (osr_nm != NULL) {
+ // Perform OSR with new nmethod
+ return osr_nm;
+ }
+ }
+ return NULL;
+}
+
+// Check if the method can be compiled, change level if necessary
+void TieredThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
+ assert(level <= TieredStopAtLevel, "Invalid compilation level");
+ if (level == CompLevel_none) {
+ return;
+ }
+ if (level == CompLevel_aot) {
+ if (mh->has_aot_code()) {
+ if (PrintTieredEvents) {
+ print_event(COMPILE, mh, mh, bci, level);
+ }
+ MutexLocker ml(Compile_lock);
+ NoSafepointVerifier nsv;
+ if (mh->has_aot_code() && mh->code() != mh->aot_code()) {
+ mh->aot_code()->make_entrant();
+ if (mh->has_compiled_code()) {
+ mh->code()->make_not_entrant();
+ }
+ MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
+ Method::set_code(mh, mh->aot_code());
+ }
+ }
+ return;
+ }
+
+ // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
+ // in the interpreter and then compile with C2 (the transition function will request that,
+ // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
+ // pure C1.
+ if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
+ if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
+ compile(mh, bci, CompLevel_simple, thread);
+ }
+ return;
+ }
+ if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
+ if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
+ nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
+ if (osr_nm != NULL && osr_nm->comp_level() > CompLevel_simple) {
+ // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
+ osr_nm->make_not_entrant();
+ }
+ compile(mh, bci, CompLevel_simple, thread);
+ }
+ return;
+ }
+ if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
+ return;
+ }
+ if (!CompileBroker::compilation_is_in_queue(mh)) {
+ if (PrintTieredEvents) {
+ print_event(COMPILE, mh, mh, bci, level);
+ }
+ submit_compile(mh, bci, level, thread);
+ }
+}
+
+// Update the rate and submit compile
+void TieredThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
+ int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
+ update_rate(os::javaTimeMillis(), mh());
+ CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
+}
+
+// Print an event.
+void TieredThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh,
+ int bci, CompLevel level) {
+ tty->print(" rate=");
+ if (mh->prev_time() == 0) tty->print("n/a");
+ else tty->print("%f", mh->rate());
+
+ tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
+ threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
+
+}
+
+// update_rate() is called from select_task() while holding a compile queue lock.
+void TieredThresholdPolicy::update_rate(jlong t, Method* m) {
+ // Skip update if counters are absent.
+ // Can't allocate them since we are holding compile queue lock.
+ if (m->method_counters() == NULL) return;
+
+ if (is_old(m)) {
+ // We don't remove old methods from the queue,
+ // so we can just zero the rate.
+ m->set_rate(0);
+ return;
+ }
+
+ // We don't update the rate if we've just came out of a safepoint.
+ // delta_s is the time since last safepoint in milliseconds.
+ jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms();
+ jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
+ // How many events were there since the last time?
+ int event_count = m->invocation_count() + m->backedge_count();
+ int delta_e = event_count - m->prev_event_count();
+
+ // We should be running for at least 1ms.
+ if (delta_s >= TieredRateUpdateMinTime) {
+ // And we must've taken the previous point at least 1ms before.
+ if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
+ m->set_prev_time(t);
+ m->set_prev_event_count(event_count);
+ m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
+ } else {
+ if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
+ // If nothing happened for 25ms, zero the rate. Don't modify prev values.
+ m->set_rate(0);
+ }
+ }
+ }
+}
+
+// Check if this method has been stale for a given number of milliseconds.
+// See select_task().
+bool TieredThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
+ jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms();
+ jlong delta_t = t - m->prev_time();
+ if (delta_t > timeout && delta_s > timeout) {
+ int event_count = m->invocation_count() + m->backedge_count();
+ int delta_e = event_count - m->prev_event_count();
+ // Return true if there were no events.
+ return delta_e == 0;
+ }
+ return false;
+}
+
+// We don't remove old methods from the compile queue even if they have
+// very low activity. See select_task().
+bool TieredThresholdPolicy::is_old(Method* method) {
+ return method->invocation_count() > 50000 || method->backedge_count() > 500000;
+}
+
+double TieredThresholdPolicy::weight(Method* method) {
+ return (double)(method->rate() + 1) *
+ (method->invocation_count() + 1) * (method->backedge_count() + 1);
+}
+
+// Apply heuristics and return true if x should be compiled before y
+bool TieredThresholdPolicy::compare_methods(Method* x, Method* y) {
+ if (x->highest_comp_level() > y->highest_comp_level()) {
+ // recompilation after deopt
+ return true;
+ } else
+ if (x->highest_comp_level() == y->highest_comp_level()) {
+ if (weight(x) > weight(y)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Is method profiled enough?
+bool TieredThresholdPolicy::is_method_profiled(Method* method) {
+ MethodData* mdo = method->method_data();
+ if (mdo != NULL) {
+ int i = mdo->invocation_count_delta();
+ int b = mdo->backedge_count_delta();
+ return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
+ }
+ return false;
+}
+
+double TieredThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
+ double queue_size = CompileBroker::queue_size(level);
+ int comp_count = compiler_count(level);
+ double k = queue_size / (feedback_k * comp_count) + 1;
+
+ // Increase C1 compile threshold when the code cache is filled more
+ // than specified by IncreaseFirstTierCompileThresholdAt percentage.
+ // The main intention is to keep enough free space for C2 compiled code
+ // to achieve peak performance if the code cache is under stress.
+ if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
+ double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
+ if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
+ k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
+ }
+ }
+ return k;
+}
+
+// Call and loop predicates determine whether a transition to a higher
+// compilation level should be performed (pointers to predicate functions
+// are passed to common()).
+// Tier?LoadFeedback is basically a coefficient that determines of
+// how many methods per compiler thread can be in the queue before
+// the threshold values double.
+bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
+ switch(cur_level) {
+ case CompLevel_aot: {
+ double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
+ return loop_predicate_helper<CompLevel_aot>(i, b, k, method);
+ }
+ case CompLevel_none:
+ case CompLevel_limited_profile: {
+ double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
+ return loop_predicate_helper<CompLevel_none>(i, b, k, method);
+ }
+ case CompLevel_full_profile: {
+ double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
+ return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
+ }
+ default:
+ return true;
+ }
+}
+
+bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
+ switch(cur_level) {
+ case CompLevel_aot: {
+ double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
+ return call_predicate_helper<CompLevel_aot>(i, b, k, method);
+ }
+ case CompLevel_none:
+ case CompLevel_limited_profile: {
+ double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
+ return call_predicate_helper<CompLevel_none>(i, b, k, method);
+ }
+ case CompLevel_full_profile: {
+ double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
+ return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
+ }
+ default:
+ return true;
+ }
+}
+
+// Determine is a method is mature.
+bool TieredThresholdPolicy::is_mature(Method* method) {
+ if (should_compile_at_level_simple(method)) return true;
+ MethodData* mdo = method->method_data();
+ if (mdo != NULL) {
+ int i = mdo->invocation_count();
+ int b = mdo->backedge_count();
+ double k = ProfileMaturityPercentage / 100.0;
+ return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) ||
+ loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
+ }
+ return false;
+}
+
+// If a method is old enough and is still in the interpreter we would want to
+// start profiling without waiting for the compiled method to arrive.
+// We also take the load on compilers into the account.
+bool TieredThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
+ if (cur_level == CompLevel_none &&
+ CompileBroker::queue_size(CompLevel_full_optimization) <=
+ Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
+ int i = method->invocation_count();
+ int b = method->backedge_count();
+ double k = Tier0ProfilingStartPercentage / 100.0;
+ return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
+ }
+ return false;
+}
+
+// Inlining control: if we're compiling a profiled method with C1 and the callee
+// is known to have OSRed in a C2 version, don't inline it.
+bool TieredThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
+ CompLevel comp_level = (CompLevel)env->comp_level();
+ if (comp_level == CompLevel_full_profile ||
+ comp_level == CompLevel_limited_profile) {
+ return callee->highest_osr_comp_level() == CompLevel_full_optimization;
+ }
+ return false;
+}
+
+// Create MDO if necessary.
+void TieredThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
+ if (mh->is_native() ||
+ mh->is_abstract() ||
+ mh->is_accessor() ||
+ mh->is_constant_getter()) {
+ return;
+ }
+ if (mh->method_data() == NULL) {
+ Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
+ }
+}
+
+
+/*
+ * Method states:
+ * 0 - interpreter (CompLevel_none)
+ * 1 - pure C1 (CompLevel_simple)
+ * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
+ * 3 - C1 with full profiling (CompLevel_full_profile)
+ * 4 - C2 (CompLevel_full_optimization)
+ *
+ * Common state transition patterns:
+ * a. 0 -> 3 -> 4.
+ * The most common path. But note that even in this straightforward case
+ * profiling can start at level 0 and finish at level 3.
+ *
+ * b. 0 -> 2 -> 3 -> 4.
+ * This case occurs when the load on C2 is deemed too high. So, instead of transitioning
+ * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
+ * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
+ *
+ * c. 0 -> (3->2) -> 4.
+ * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
+ * to enable the profiling to fully occur at level 0. In this case we change the compilation level
+ * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
+ * without full profiling while c2 is compiling.
+ *
+ * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
+ * After a method was once compiled with C1 it can be identified as trivial and be compiled to
+ * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
+ *
+ * e. 0 -> 4.
+ * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
+ * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
+ * the compiled version already exists).
+ *
+ * Note that since state 0 can be reached from any other state via deoptimization different loops
+ * are possible.
+ *
+ */
+
+// Common transition function. Given a predicate determines if a method should transition to another level.
+CompLevel TieredThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
+ CompLevel next_level = cur_level;
+ int i = method->invocation_count();
+ int b = method->backedge_count();
+
+ if (should_compile_at_level_simple(method)) {
+ next_level = CompLevel_simple;
+ } else {
+ switch(cur_level) {
+ default: break;
+ case CompLevel_aot: {
+ // If we were at full profile level, would we switch to full opt?
+ if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
+ next_level = CompLevel_full_optimization;
+ } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+ Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+ (this->*p)(i, b, cur_level, method))) {
+ next_level = CompLevel_full_profile;
+ }
+ }
+ break;
+ case CompLevel_none:
+ // If we were at full profile level, would we switch to full opt?
+ if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
+ next_level = CompLevel_full_optimization;
+ } else if ((this->*p)(i, b, cur_level, method)) {
+#if INCLUDE_JVMCI
+ if (EnableJVMCI && UseJVMCICompiler) {
+ // Since JVMCI takes a while to warm up, its queue inevitably backs up during
+ // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
+ // compilation method and all potential inlinees have mature profiles (which
+ // includes type profiling). If it sees immature profiles, JVMCI's inliner
+ // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
+ // exploring/inlining too many graphs). Since a rewrite of the inliner is
+ // in progress, we simply disable the dialing back heuristic for now and will
+ // revisit this decision once the new inliner is completed.
+ next_level = CompLevel_full_profile;
+ } else
+#endif
+ {
+ // C1-generated fully profiled code is about 30% slower than the limited profile
+ // code that has only invocation and backedge counters. The observation is that
+ // if C2 queue is large enough we can spend too much time in the fully profiled code
+ // while waiting for C2 to pick the method from the queue. To alleviate this problem
+ // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
+ // we choose to compile a limited profiled version and then recompile with full profiling
+ // when the load on C2 goes down.
+ if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
+ Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
+ next_level = CompLevel_limited_profile;
+ } else {
+ next_level = CompLevel_full_profile;
+ }
+ }
+ }
+ break;
+ case CompLevel_limited_profile:
+ if (is_method_profiled(method)) {
+ // Special case: we got here because this method was fully profiled in the interpreter.
+ next_level = CompLevel_full_optimization;
+ } else {
+ MethodData* mdo = method->method_data();
+ if (mdo != NULL) {
+ if (mdo->would_profile()) {
+ if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+ Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+ (this->*p)(i, b, cur_level, method))) {
+ next_level = CompLevel_full_profile;
+ }
+ } else {
+ next_level = CompLevel_full_optimization;
+ }
+ } else {
+ // If there is no MDO we need to profile
+ if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
+ Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
+ (this->*p)(i, b, cur_level, method))) {
+ next_level = CompLevel_full_profile;
+ }
+ }
+ }
+ break;
+ case CompLevel_full_profile:
+ {
+ MethodData* mdo = method->method_data();
+ if (mdo != NULL) {
+ if (mdo->would_profile()) {
+ int mdo_i = mdo->invocation_count_delta();
+ int mdo_b = mdo->backedge_count_delta();
+ if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
+ next_level = CompLevel_full_optimization;
+ }
+ } else {
+ next_level = CompLevel_full_optimization;
+ }
+ }
+ }
+ break;
+ }
+ }
+ return MIN2(next_level, (CompLevel)TieredStopAtLevel);
+}
+
+// Determine if a method should be compiled with a normal entry point at a different level.
+CompLevel TieredThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) {
+ CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
+ common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true));
+ CompLevel next_level = common(&TieredThresholdPolicy::call_predicate, method, cur_level);
+
+ // If OSR method level is greater than the regular method level, the levels should be
+ // equalized by raising the regular method level in order to avoid OSRs during each
+ // invocation of the method.
+ if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
+ MethodData* mdo = method->method_data();
+ guarantee(mdo != NULL, "MDO should not be NULL");
+ if (mdo->invocation_count() >= 1) {
+ next_level = CompLevel_full_optimization;
+ }
+ } else {
+ next_level = MAX2(osr_level, next_level);
+ }
+ return next_level;
+}
+
+// Determine if we should do an OSR compilation of a given method.
+CompLevel TieredThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) {
+ CompLevel next_level = common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true);
+ if (cur_level == CompLevel_none) {
+ // If there is a live OSR method that means that we deopted to the interpreter
+ // for the transition.
+ CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
+ if (osr_level > CompLevel_none) {
+ return osr_level;
+ }
+ }
+ return next_level;
+}
+
+bool TieredThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) {
+ if (UseAOT) {
+ if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
+ // If the current level is full profile or interpreter and we're switching to any other level,
+ // activate the AOT code back first so that we won't waste time overprofiling.
+ compile(mh, InvocationEntryBci, CompLevel_aot, thread);
+ // Fall through for JIT compilation.
+ }
+ if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
+ // If the next level is limited profile, use the aot code (if there is any),
+ // since it's essentially the same thing.
+ compile(mh, InvocationEntryBci, CompLevel_aot, thread);
+ // Not need to JIT, we're done.
+ return true;
+ }
+ }
+ return false;
+}
+
+
+// Handle the invocation event.
+void TieredThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
+ CompLevel level, CompiledMethod* nm, JavaThread* thread) {
+ if (should_create_mdo(mh(), level)) {
+ create_mdo(mh, thread);
+ }
+ CompLevel next_level = call_event(mh(), level, thread);
+ if (next_level != level) {
+ if (maybe_switch_to_aot(mh, level, next_level, thread)) {
+ // No JITting necessary
+ return;
+ }
+ if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
+ compile(mh, InvocationEntryBci, next_level, thread);
+ }
+ }
+}
+
+// Handle the back branch event. Notice that we can compile the method
+// with a regular entry from here.
+void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
+ int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
+ if (should_create_mdo(mh(), level)) {
+ create_mdo(mh, thread);
+ }
+ // Check if MDO should be created for the inlined method
+ if (should_create_mdo(imh(), level)) {
+ create_mdo(imh, thread);
+ }
+
+ if (is_compilation_enabled()) {
+ CompLevel next_osr_level = loop_event(imh(), level, thread);
+ CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
+ // At the very least compile the OSR version
+ if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
+ compile(imh, bci, next_osr_level, thread);
+ }
+
+ // Use loop event as an opportunity to also check if there's been
+ // enough calls.
+ CompLevel cur_level, next_level;
+ if (mh() != imh()) { // If there is an enclosing method
+ if (level == CompLevel_aot) {
+ // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
+ if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
+ compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread);
+ }
+ } else {
+ // Current loop event level is not AOT
+ guarantee(nm != NULL, "Should have nmethod here");
+ cur_level = comp_level(mh());
+ next_level = call_event(mh(), cur_level, thread);
+
+ if (max_osr_level == CompLevel_full_optimization) {
+ // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
+ bool make_not_entrant = false;
+ if (nm->is_osr_method()) {
+ // This is an osr method, just make it not entrant and recompile later if needed
+ make_not_entrant = true;
+ } else {
+ if (next_level != CompLevel_full_optimization) {
+ // next_level is not full opt, so we need to recompile the
+ // enclosing method without the inlinee
+ cur_level = CompLevel_none;
+ make_not_entrant = true;
+ }
+ }
+ if (make_not_entrant) {
+ if (PrintTieredEvents) {
+ int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
+ print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
+ }
+ nm->make_not_entrant();
+ }
+ }
+ // Fix up next_level if necessary to avoid deopts
+ if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
+ next_level = CompLevel_full_profile;
+ }
+ if (cur_level != next_level) {
+ if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
+ compile(mh, InvocationEntryBci, next_level, thread);
+ }
+ }
+ }
+ } else {
+ cur_level = comp_level(mh());
+ next_level = call_event(mh(), cur_level, thread);
+ if (next_level != cur_level) {
+ if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
+ compile(mh, InvocationEntryBci, next_level, thread);
+ }
+ }
+ }
+ }
+}
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/compiler/tieredThresholdPolicy.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP
+#define SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP
+
+#include "code/nmethod.hpp"
+#include "compiler/compilationPolicy.hpp"
+#include "oops/methodData.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#ifdef TIERED
+
+class CompileTask;
+class CompileQueue;
+/*
+ * The system supports 5 execution levels:
+ * * level 0 - interpreter
+ * * level 1 - C1 with full optimization (no profiling)
+ * * level 2 - C1 with invocation and backedge counters
+ * * level 3 - C1 with full profiling (level 2 + MDO)
+ * * level 4 - C2
+ *
+ * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters
+ * (invocation counters and backedge counters). The frequency of these notifications is
+ * different at each level. These notifications are used by the policy to decide what transition
+ * to make.
+ *
+ * Execution starts at level 0 (interpreter), then the policy can decide either to compile the
+ * method at level 3 or level 2. The decision is based on the following factors:
+ * 1. The length of the C2 queue determines the next level. The observation is that level 2
+ * is generally faster than level 3 by about 30%, therefore we would want to minimize the time
+ * a method spends at level 3. We should only spend the time at level 3 that is necessary to get
+ * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to
+ * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile
+ * request makes its way through the long queue. When the load on C2 recedes we are going to
+ * recompile at level 3 and start gathering profiling information.
+ * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce
+ * additional filtering if the compiler is overloaded. The rationale is that by the time a
+ * method gets compiled it can become unused, so it doesn't make sense to put too much onto the
+ * queue.
+ *
+ * After profiling is completed at level 3 the transition is made to level 4. Again, the length
+ * of the C2 queue is used as a feedback to adjust the thresholds.
+ *
+ * After the first C1 compile some basic information is determined about the code like the number
+ * of the blocks and the number of the loops. Based on that it can be decided that a method
+ * is trivial and compiling it with C1 will yield the same code. In this case the method is
+ * compiled at level 1 instead of 4.
+ *
+ * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of
+ * the code and the C2 queue is sufficiently small we can decide to start profiling in the
+ * interpreter (and continue profiling in the compiled code once the level 3 version arrives).
+ * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2
+ * version is compiled instead in order to run faster waiting for a level 4 version.
+ *
+ * Compile queues are implemented as priority queues - for each method in the queue we compute
+ * the event rate (the number of invocation and backedge counter increments per unit of time).
+ * When getting an element off the queue we pick the one with the largest rate. Maintaining the
+ * rate also allows us to remove stale methods (the ones that got on the queue but stopped
+ * being used shortly after that).
+*/
+
+/* Command line options:
+ * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method
+ * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
+ * makes a call into the runtime.
+ *
+ * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
+ * compilation thresholds.
+ * Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
+ * Other thresholds work as follows:
+ *
+ * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when
+ * the following predicate is true (X is the level):
+ *
+ * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s),
+ *
+ * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling
+ * coefficient that will be discussed further.
+ * The intuition is to equalize the time that is spend profiling each method.
+ * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
+ * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
+ * from Method* and for 3->4 transition they come from MDO (since profiled invocations are
+ * counted separately). Finally, if a method does not contain anything worth profiling, a transition
+ * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
+ * what is specified by Tier4InvocationThreshold).
+ *
+ * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
+ *
+ * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending
+ * on the compiler load. The scaling coefficients are computed as follows:
+ *
+ * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1,
+ *
+ * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X
+ * is the number of level X compiler threads.
+ *
+ * Basically these parameters describe how many methods should be in the compile queue
+ * per compiler thread before the scaling coefficient increases by one.
+ *
+ * This feedback provides the mechanism to automatically control the flow of compilation requests
+ * depending on the machine speed, mutator load and other external factors.
+ *
+ * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop.
+ * Consider the following observation: a method compiled with full profiling (level 3)
+ * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO).
+ * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue
+ * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues
+ * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed.
+ * The idea is to dynamically change the behavior of the system in such a way that if a substantial
+ * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster.
+ * And then when the load decreases to allow 2->3 transitions.
+ *
+ * Tier3Delay* parameters control this switching mechanism.
+ * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy
+ * no longer does 0->3 transitions but does 0->2 transitions instead.
+ * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue
+ * per compiler thread falls below the specified amount.
+ * The hysteresis is necessary to avoid jitter.
+ *
+ * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue.
+ * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to
+ * compile from the compile queue, we also can detect stale methods for which the rate has been
+ * 0 for some time in the same iteration. Stale methods can appear in the queue when an application
+ * abruptly changes its behavior.
+ *
+ * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick
+ * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything
+ * with pure c1.
+ *
+ * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the
+ * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the
+ * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled
+ * version in time. This reduces the overall transition to level 4 and decreases the startup time.
+ * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long
+ * these is not reason to start profiling prematurely.
+ *
+ * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation.
+ * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered
+ * to be zero if no events occurred in TieredRateUpdateMaxTime.
+ */
+
+class TieredThresholdPolicy : public CompilationPolicy {
+ jlong _start_time;
+ int _c1_count, _c2_count;
+
+ // Check if the counter is big enough and set carry (effectively infinity).
+ inline void set_carry_if_necessary(InvocationCounter *counter);
+ // Set carry flags in the counters (in Method* and MDO).
+ inline void handle_counter_overflow(Method* method);
+ // Call and loop predicates determine whether a transition to a higher compilation
+ // level should be performed (pointers to predicate functions are passed to common_TF().
+ // Predicates also take compiler load into account.
+ typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
+ bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
+ bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
+ // Common transition function. Given a predicate determines if a method should transition to another level.
+ CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false);
+ // Transition functions.
+ // call_event determines if a method should be compiled at a different
+ // level with a regular invocation entry.
+ CompLevel call_event(Method* method, CompLevel cur_level, JavaThread* thread);
+ // loop_event checks if a method should be OSR compiled at a different
+ // level.
+ CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread* thread);
+ void print_counters(const char* prefix, const methodHandle& mh);
+ // Has a method been long around?
+ // We don't remove old methods from the compile queue even if they have
+ // very low activity (see select_task()).
+ inline bool is_old(Method* method);
+ // Was a given method inactive for a given number of milliseconds.
+ // If it is, we would remove it from the queue (see select_task()).
+ inline bool is_stale(jlong t, jlong timeout, Method* m);
+ // Compute the weight of the method for the compilation scheduling
+ inline double weight(Method* method);
+ // Apply heuristics and return true if x should be compiled before y
+ inline bool compare_methods(Method* x, Method* y);
+ // Compute event rate for a given method. The rate is the number of event (invocations + backedges)
+ // per millisecond.
+ inline void update_rate(jlong t, Method* m);
+ // Compute threshold scaling coefficient
+ inline double threshold_scale(CompLevel level, int feedback_k);
+ // If a method is old enough and is still in the interpreter we would want to
+ // start profiling without waiting for the compiled method to arrive. This function
+ // determines whether we should do that.
+ inline bool should_create_mdo(Method* method, CompLevel cur_level);
+ // Create MDO if necessary.
+ void create_mdo(const methodHandle& mh, JavaThread* thread);
+ // Is method profiled enough?
+ bool is_method_profiled(Method* method);
+
+ double _increase_threshold_at_ratio;
+
+ bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread);
+
+ int c1_count() const { return _c1_count; }
+ int c2_count() const { return _c2_count; }
+ void set_c1_count(int x) { _c1_count = x; }
+ void set_c2_count(int x) { _c2_count = x; }
+
+ enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
+ void print_event(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level);
+ // Print policy-specific information if necessary
+ void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level);
+ // Check if the method can be compiled, change level if necessary
+ void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
+ // Submit a given method for compilation
+ void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
+ // Simple methods are as good being compiled with C1 as C2.
+ // This function tells if it's such a function.
+ inline static bool is_trivial(Method* method);
+ // Force method to be compiled at CompLevel_simple?
+ inline static bool should_compile_at_level_simple(Method* method);
+
+ // Predicate helpers are used by .*_predicate() methods as well as others.
+ // They check the given counter values, multiplied by the scale against the thresholds.
+ template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale, Method* method);
+ template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method);
+
+ // Get a compilation level for a given method.
+ static CompLevel comp_level(Method* method);
+ void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
+ CompLevel level, CompiledMethod* nm, JavaThread* thread);
+ void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
+ int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread);
+
+ void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
+ void set_start_time(jlong t) { _start_time = t; }
+ jlong start_time() const { return _start_time; }
+
+public:
+ TieredThresholdPolicy() : _start_time(0), _c1_count(0), _c2_count(0) { }
+ virtual int compiler_count(CompLevel comp_level) {
+ if (is_c1_compile(comp_level)) return c1_count();
+ if (is_c2_compile(comp_level)) return c2_count();
+ return 0;
+ }
+ virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); }
+ virtual void do_safepoint_work() { }
+ virtual void delay_compilation(Method* method) { }
+ virtual void disable_compilation(Method* method) { }
+ virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
+ virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee,
+ int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread);
+ // Select task is called by CompileBroker. We should return a task or NULL.
+ virtual CompileTask* select_task(CompileQueue* compile_queue);
+ // Tell the runtime if we think a given method is adequately profiled.
+ virtual bool is_mature(Method* method);
+ // Initialize: set compiler thread count
+ virtual void initialize();
+ virtual bool should_not_inline(ciEnv* env, ciMethod* callee);
+};
+
+#endif // TIERED
+
+#endif // SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -78,6 +78,8 @@
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_prev_collection_pause_end_ms(0.0),
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
+ _concurrent_refine_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
+ _logged_cards_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_logged_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
@@ -102,6 +104,10 @@
int index = MIN2(ParallelGCThreads - 1, 7u);
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
+ // Start with inverse of maximum STW cost.
+ _concurrent_refine_rate_ms_seq->add(1/cost_per_logged_card_ms_defaults[0]);
+ // Some applications have very low rates for logging cards.
+ _logged_cards_rate_ms_seq->add(0.0);
_cost_per_logged_card_ms_seq->add(cost_per_logged_card_ms_defaults[index]);
_cost_scan_hcc_seq->add(0.0);
_young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]);
@@ -159,6 +165,14 @@
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
}
+void G1Analytics::report_concurrent_refine_rate_ms(double cards_per_ms) {
+ _concurrent_refine_rate_ms_seq->add(cards_per_ms);
+}
+
+void G1Analytics::report_logged_cards_rate_ms(double cards_per_ms) {
+ _logged_cards_rate_ms_seq->add(cards_per_ms);
+}
+
void G1Analytics::report_cost_per_logged_card_ms(double cost_per_logged_card_ms) {
_cost_per_logged_card_ms_seq->add(cost_per_logged_card_ms);
}
@@ -223,6 +237,14 @@
return get_new_prediction(_alloc_rate_ms_seq);
}
+double G1Analytics::predict_concurrent_refine_rate_ms() const {
+ return get_new_prediction(_concurrent_refine_rate_ms_seq);
+}
+
+double G1Analytics::predict_logged_cards_rate_ms() const {
+ return get_new_prediction(_logged_cards_rate_ms_seq);
+}
+
double G1Analytics::predict_cost_per_logged_card_ms() const {
return get_new_prediction(_cost_per_logged_card_ms_seq);
}
--- a/src/hotspot/share/gc/g1/g1Analytics.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1Analytics.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -46,6 +46,8 @@
double _prev_collection_pause_end_ms;
TruncatedSeq* _rs_length_diff_seq;
+ TruncatedSeq* _concurrent_refine_rate_ms_seq;
+ TruncatedSeq* _logged_cards_rate_ms_seq;
TruncatedSeq* _cost_per_logged_card_ms_seq;
TruncatedSeq* _cost_scan_hcc_seq;
TruncatedSeq* _young_cards_per_entry_ratio_seq;
@@ -99,6 +101,8 @@
void report_concurrent_mark_remark_times_ms(double ms);
void report_concurrent_mark_cleanup_times_ms(double ms);
void report_alloc_rate_ms(double alloc_rate);
+ void report_concurrent_refine_rate_ms(double cards_per_ms);
+ void report_logged_cards_rate_ms(double cards_per_ms);
void report_cost_per_logged_card_ms(double cost_per_logged_card_ms);
void report_cost_scan_hcc(double cost_scan_hcc);
void report_cost_per_remset_card_ms(double cost_per_remset_card_ms, bool for_young_gc);
@@ -116,6 +120,8 @@
double predict_alloc_rate_ms() const;
int num_alloc_rate_ms() const;
+ double predict_concurrent_refine_rate_ms() const;
+ double predict_logged_cards_rate_ms() const;
double predict_cost_per_logged_card_ms() const;
double predict_scan_hcc_ms() const;
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -409,7 +409,7 @@
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
- size_t pending_cards = _policy->pending_cards();
+ size_t pending_cards = _policy->pending_cards_at_gc_start();
double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -412,6 +412,22 @@
dcqs.notify_if_necessary();
}
+G1ConcurrentRefine::RefinementStats G1ConcurrentRefine::total_refinement_stats() const {
+ struct CollectData : public ThreadClosure {
+ Tickspan _total_time;
+ size_t _total_cards;
+ CollectData() : _total_time(), _total_cards(0) {}
+ virtual void do_thread(Thread* t) {
+ G1ConcurrentRefineThread* crt = static_cast<G1ConcurrentRefineThread*>(t);
+ _total_time += crt->total_refinement_time();
+ _total_cards += crt->total_refined_cards();
+ }
+ } collector;
+ // Cast away const so we can call non-modifying closure on threads.
+ const_cast<G1ConcurrentRefine*>(this)->threads_do(&collector);
+ return RefinementStats(collector._total_time, collector._total_cards);
+}
+
size_t G1ConcurrentRefine::activation_threshold(uint worker_id) const {
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
return activation_level(thresholds);
@@ -432,7 +448,8 @@
}
}
-bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
+bool G1ConcurrentRefine::do_refinement_step(uint worker_id,
+ size_t* total_refined_cards) {
G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
size_t curr_cards = dcqs.num_cards();
@@ -448,5 +465,6 @@
// Process the next buffer, if there are enough left.
return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(),
- deactivation_threshold(worker_id));
+ deactivation_threshold(worker_id),
+ total_refined_cards);
}
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/ticks.hpp"
// Forward decl
class G1ConcurrentRefine;
@@ -118,11 +119,22 @@
// Adjust refinement thresholds based on work done during the pause and the goal time.
void adjust(double logged_cards_scan_time, size_t processed_logged_cards, double goal_ms);
+ struct RefinementStats {
+ Tickspan _time;
+ size_t _cards;
+ RefinementStats(Tickspan time, size_t cards) : _time(time), _cards(cards) {}
+ };
+
+ RefinementStats total_refinement_stats() const;
+
// Cards in the dirty card queue set.
size_t activation_threshold(uint worker_id) const;
size_t deactivation_threshold(uint worker_id) const;
- // Perform a single refinement step. Called by the refinement threads when woken up.
- bool do_refinement_step(uint worker_id);
+
+ // Perform a single refinement step; called by the refinement
+ // threads. Returns true if there was refinement work available.
+ // Increments *total_refined_cards.
+ bool do_refinement_step(uint worker_id, size_t* total_refined_cards);
// Iterate over all concurrent refinement threads applying the given closure.
void threads_do(ThreadClosure *tc);
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -37,6 +37,8 @@
ConcurrentGCThread(),
_vtime_start(0.0),
_vtime_accum(0.0),
+ _total_refinement_time(),
+ _total_refined_cards(0),
_worker_id(worker_id),
_active(false),
_monitor(NULL),
@@ -101,11 +103,12 @@
break;
}
- size_t buffers_processed = 0;
log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
_worker_id, _cr->activation_threshold(_worker_id),
G1BarrierSet::dirty_card_queue_set().num_cards());
+ size_t start_total_refined_cards = _total_refined_cards; // For logging.
+
{
SuspendibleThreadSetJoiner sts_join;
@@ -115,20 +118,22 @@
continue; // Re-check for termination after yield delay.
}
- if (!_cr->do_refinement_step(_worker_id)) {
- break;
+ Ticks start_time = Ticks::now();
+ if (!_cr->do_refinement_step(_worker_id, &_total_refined_cards)) {
+ break; // No cards to process.
}
- ++buffers_processed;
+ _total_refinement_time += (Ticks::now() - start_time);
}
}
deactivate();
log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT
- ", current: " SIZE_FORMAT ", buffers processed: "
- SIZE_FORMAT,
+ ", current: " SIZE_FORMAT ", refined cards: "
+ SIZE_FORMAT ", total refined cards: " SIZE_FORMAT,
_worker_id, _cr->deactivation_threshold(_worker_id),
G1BarrierSet::dirty_card_queue_set().num_cards(),
- buffers_processed);
+ _total_refined_cards - start_total_refined_cards,
+ _total_refined_cards);
if (os::supports_vtime()) {
_vtime_accum = (os::elapsedVTime() - _vtime_start);
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,7 @@
#define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#include "gc/shared/concurrentGCThread.hpp"
+#include "utilities/ticks.hpp"
// Forward Decl.
class G1ConcurrentRefine;
@@ -38,6 +39,10 @@
double _vtime_start; // Initial virtual time.
double _vtime_accum; // Accumulated virtual time.
+
+ Tickspan _total_refinement_time;
+ size_t _total_refined_cards;
+
uint _worker_id;
bool _active;
@@ -61,6 +66,9 @@
// Activate this thread.
void activate();
+ Tickspan total_refinement_time() const { return _total_refinement_time; }
+ size_t total_refined_cards() const { return _total_refined_cards; }
+
// Total virtual time so far.
double vtime_accum() { return _vtime_accum; }
};
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -37,6 +37,7 @@
#include "runtime/atomic.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
@@ -62,6 +63,9 @@
}
}
+// Assumed to be zero by concurrent threads.
+static uint par_ids_start() { return 0; }
+
G1DirtyCardQueueSet::G1DirtyCardQueueSet(Monitor* cbl_mon,
BufferNode::Allocator* allocator) :
PtrQueueSet(allocator),
@@ -73,15 +77,16 @@
_process_completed_buffers(false),
_max_cards(MaxCardsUnlimited),
_max_cards_padding(0),
- _free_ids(0, num_par_ids()),
- _processed_buffers_mut(0),
- _processed_buffers_rs_thread(0)
+ _free_ids(par_ids_start(), num_par_ids()),
+ _mutator_refined_cards_counters(NEW_C_HEAP_ARRAY(size_t, num_par_ids(), mtGC))
{
+ ::memset(_mutator_refined_cards_counters, 0, num_par_ids() * sizeof(size_t));
_all_active = true;
}
G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
abandon_completed_buffers();
+ FREE_C_HEAP_ARRAY(size_t, _mutator_refined_cards_counters);
}
// Determines how many mutator threads can process the buffers in parallel.
@@ -89,6 +94,14 @@
return (uint)os::initial_active_processor_count();
}
+size_t G1DirtyCardQueueSet::total_mutator_refined_cards() const {
+ size_t sum = 0;
+ for (uint i = 0; i < num_par_ids(); ++i) {
+ sum += _mutator_refined_cards_counters[i];
+ }
+ return sum;
+}
+
void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
}
@@ -213,7 +226,9 @@
return result;
}
-bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node, uint worker_id) {
+bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
+ uint worker_id,
+ size_t* total_refined_cards) {
G1RemSet* rem_set = G1CollectedHeap::heap()->rem_set();
size_t size = buffer_size();
void** buffer = BufferNode::make_buffer_from_node(node);
@@ -223,6 +238,7 @@
CardTable::CardValue* cp = static_cast<CardTable::CardValue*>(buffer[i]);
rem_set->refine_card_concurrently(cp, worker_id);
}
+ *total_refined_cards += (i - node->index());
node->set_index(i);
return i == size;
}
@@ -260,25 +276,27 @@
bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id
- bool result = refine_buffer(node, worker_id);
+ uint counter_index = worker_id - par_ids_start();
+ size_t* counter = &_mutator_refined_cards_counters[counter_index];
+ bool result = refine_buffer(node, worker_id, counter);
_free_ids.release_par_id(worker_id); // release the id
if (result) {
assert_fully_consumed(node, buffer_size());
- Atomic::inc(&_processed_buffers_mut);
}
return result;
}
-bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id, size_t stop_at) {
+bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id,
+ size_t stop_at,
+ size_t* total_refined_cards) {
BufferNode* node = get_completed_buffer(stop_at);
if (node == NULL) {
return false;
- } else if (refine_buffer(node, worker_id)) {
+ } else if (refine_buffer(node, worker_id, total_refined_cards)) {
assert_fully_consumed(node, buffer_size());
// Done with fully processed buffer.
deallocate_buffer(node);
- Atomic::inc(&_processed_buffers_rs_thread);
return true;
} else {
// Return partially processed buffer to the queue.
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -78,14 +78,15 @@
void abandon_completed_buffers();
- // Refine the cards in "node" from it's index to buffer_size.
+ // Refine the cards in "node" from its index to buffer_size.
// Stops processing if SuspendibleThreadSet::should_yield() is true.
// Returns true if the entire buffer was processed, false if there
// is a pending yield request. The node's index is updated to exclude
// the processed elements, e.g. up to the element before processing
// stopped, or one past the last element if the entire buffer was
- // processed.
- bool refine_buffer(BufferNode* node, uint worker_id);
+ // processed. Increments *total_refined_cards by the number of cards
+ // processed and removed from the buffer.
+ bool refine_buffer(BufferNode* node, uint worker_id, size_t* total_refined_cards);
bool mut_process_buffer(BufferNode* node);
@@ -97,10 +98,9 @@
G1FreeIdSet _free_ids;
- // The number of completed buffers processed by mutator and rs thread,
- // respectively.
- jint _processed_buffers_mut;
- jint _processed_buffers_rs_thread;
+ // Array of cumulative dirty cards refined by mutator threads.
+ // Array has an entry per id in _free_ids.
+ size_t* _mutator_refined_cards_counters;
public:
G1DirtyCardQueueSet(Monitor* cbl_mon, BufferNode::Allocator* allocator);
@@ -158,7 +158,12 @@
// Stops processing a buffer if SuspendibleThreadSet::should_yield(),
// returning the incompletely processed buffer to the completed buffer
// list, for later processing of the remainder.
- bool refine_completed_buffer_concurrently(uint worker_id, size_t stop_at);
+ //
+ // Increments *total_refined_cards by the number of cards processed and
+ // removed from the buffer.
+ bool refine_completed_buffer_concurrently(uint worker_id,
+ size_t stop_at,
+ size_t* total_refined_cards);
// If a full collection is happening, reset partial logs, and release
// completed ones: the full collection will make them all irrelevant.
@@ -181,13 +186,8 @@
return _max_cards_padding;
}
- jint processed_buffers_mut() {
- return _processed_buffers_mut;
- }
- jint processed_buffers_rs_thread() {
- return _processed_buffers_rs_thread;
- }
-
+ // Total dirty cards refined by mutator threads.
+ size_t total_mutator_refined_cards() const;
};
inline G1DirtyCardQueueSet* G1DirtyCardQueue::dirty_card_qset() const {
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/g1/g1Policy.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -70,7 +70,11 @@
_free_regions_at_end_of_collection(0),
_max_rs_length(0),
_rs_length_prediction(0),
- _pending_cards(0),
+ _pending_cards_at_gc_start(0),
+ _pending_cards_at_prev_gc_end(0),
+ _total_mutator_refined_cards(0),
+ _total_concurrent_refined_cards(0),
+ _total_concurrent_refinement_time(),
_bytes_allocated_in_old_since_last_gc(0),
_initial_mark_to_mixed(),
_collection_set(NULL),
@@ -442,6 +446,7 @@
collector_state()->set_in_young_only_phase(false);
collector_state()->set_in_full_gc(true);
_collection_set->clear_candidates();
+ record_concurrent_refinement_data(true /* is_full_collection */);
}
void G1Policy::record_full_collection_end() {
@@ -472,12 +477,67 @@
_survivor_surv_rate_group->reset();
update_young_list_max_and_target_length();
update_rs_length_prediction();
+ _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
_bytes_allocated_in_old_since_last_gc = 0;
record_pause(FullGC, _full_collection_start_sec, end_sec);
}
+void G1Policy::record_concurrent_refinement_data(bool is_full_collection) {
+ _pending_cards_at_gc_start = _g1h->pending_card_num();
+
+ // Record info about concurrent refinement thread processing.
+ G1ConcurrentRefine* cr = _g1h->concurrent_refine();
+ G1ConcurrentRefine::RefinementStats cr_stats = cr->total_refinement_stats();
+
+ Tickspan cr_time = cr_stats._time - _total_concurrent_refinement_time;
+ _total_concurrent_refinement_time = cr_stats._time;
+
+ size_t cr_cards = cr_stats._cards - _total_concurrent_refined_cards;
+ _total_concurrent_refined_cards = cr_stats._cards;
+
+ // Don't update rate if full collection. We could be in an implicit full
+ // collection after a non-full collection failure, in which case there
+ // wasn't any mutator/cr-thread activity since last recording. And if
+ // we're in an explicit full collection, the time since the last GC can
+ // be arbitrarily short, so not a very good sample. Similarly, don't
+ // update the rate if the current sample is empty or time is zero.
+ if (!is_full_collection && (cr_cards > 0) && (cr_time > Tickspan())) {
+ double rate = cr_cards / (cr_time.seconds() * MILLIUNITS);
+ _analytics->report_concurrent_refine_rate_ms(rate);
+ }
+
+ // Record info about mutator thread processing.
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ size_t mut_total_cards = dcqs.total_mutator_refined_cards();
+ size_t mut_cards = mut_total_cards - _total_mutator_refined_cards;
+ _total_mutator_refined_cards = mut_total_cards;
+
+ // Record mutator's card logging rate.
+ // Don't update if full collection; see above.
+ if (!is_full_collection) {
+ size_t total_cards = _pending_cards_at_gc_start + cr_cards + mut_cards;
+ assert(_pending_cards_at_prev_gc_end <= total_cards,
+ "untracked cards: last pending: " SIZE_FORMAT
+ ", pending: " SIZE_FORMAT ", conc refine: " SIZE_FORMAT
+ ", mut refine:" SIZE_FORMAT,
+ _pending_cards_at_prev_gc_end, _pending_cards_at_gc_start,
+ cr_cards, mut_cards);
+ size_t logged_cards = total_cards - _pending_cards_at_prev_gc_end;
+ double logging_start_time = _analytics->prev_collection_pause_end_ms();
+ double logging_end_time = Ticks::now().seconds() * MILLIUNITS;
+ double logging_time = logging_end_time - logging_start_time;
+ // Unlike above for conc-refine rate, here we should not require a
+ // non-empty sample, since an application could go some time with only
+ // young-gen or filtered out writes. But we'll ignore unusually short
+ // sample periods, as they may just pollute the predictions.
+ if (logging_time > 1.0) { // Require > 1ms sample time.
+ _analytics->report_logged_cards_rate_ms(logged_cards / logging_time);
+ }
+ }
+}
+
void G1Policy::record_collection_pause_start(double start_time_sec) {
// We only need to do this here as the policy will only be applied
// to the GC we're about to start. so, no point is calculating this
@@ -490,7 +550,8 @@
assert_used_and_recalculate_used_equal(_g1h);
phase_times()->record_cur_collection_start_sec(start_time_sec);
- _pending_cards = _g1h->pending_card_num();
+
+ record_concurrent_refinement_data(false /* is_full_collection */);
_collection_set->reset_bytes_used_before();
_bytes_copied_during_gc = 0;
@@ -744,7 +805,7 @@
// after the mixed gc phase.
// During mixed gc we do not use them for young gen sizing.
if (this_pause_was_young_only) {
- _analytics->report_pending_cards((double) _pending_cards);
+ _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
_analytics->report_rs_length((double) _max_rs_length);
}
}
@@ -798,6 +859,7 @@
scan_logged_cards_time_goal_ms -= scan_hcc_time_ms;
}
+ _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
double const logged_cards_time = logged_cards_processing_time();
log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
--- a/src/hotspot/share/gc/g1/g1Policy.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -100,7 +100,11 @@
size_t _rs_length_prediction;
- size_t _pending_cards;
+ size_t _pending_cards_at_gc_start;
+ size_t _pending_cards_at_prev_gc_end;
+ size_t _total_mutator_refined_cards;
+ size_t _total_concurrent_refined_cards;
+ Tickspan _total_concurrent_refinement_time;
// The amount of allocated bytes in old gen during the last mutator and the following
// young GC phase.
@@ -244,7 +248,15 @@
uint base_free_regions, double target_pause_time_ms) const;
public:
- size_t pending_cards() const { return _pending_cards; }
+ size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; }
+
+ size_t total_concurrent_refined_cards() const {
+ return _total_concurrent_refined_cards;
+ }
+
+ size_t total_mutator_refined_cards() const {
+ return _total_mutator_refined_cards;
+ }
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
@@ -283,6 +295,9 @@
void record_pause(PauseKind kind, double start, double end);
// Indicate that we aborted marking before doing any mixed GCs.
void abort_time_to_mixed_tracking();
+
+ void record_concurrent_refinement_data(bool is_full_collection);
+
public:
G1Policy(STWGCTimer* gc_timer);
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -489,7 +489,6 @@
_scan_state(new G1RemSetScanState()),
_prev_period_summary(),
_g1h(g1h),
- _num_conc_refined_cards(0),
_ct(ct),
_g1p(_g1h->policy()),
_hot_card_cache(hot_card_cache) {
@@ -1377,7 +1376,6 @@
G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id);
if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) {
- _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
return;
}
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -61,7 +61,6 @@
G1RemSetSummary _prev_period_summary;
G1CollectedHeap* _g1h;
- size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
G1CardTable* _ct;
G1Policy* _g1p;
@@ -125,8 +124,6 @@
// Print accumulated summary info from the last time called.
void print_periodic_summary_info(const char* header, uint period_count);
- size_t num_conc_refined_cards() const { return _num_conc_refined_cards; }
-
// Rebuilds the remembered set by scanning from bottom to TARS for all regions
// using the given work gang.
void rebuild_rem_set(G1ConcurrentMark* cm, WorkGang* workers, uint worker_id_offset);
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -27,6 +27,7 @@
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1DirtyCardQueue.hpp"
+#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RemSetSummary.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@@ -53,18 +54,17 @@
};
void G1RemSetSummary::update() {
- _num_conc_refined_cards = _rem_set->num_conc_refined_cards();
- G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
- _num_processed_buf_mutator = dcqs.processed_buffers_mut();
- _num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread();
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+ const G1Policy* policy = g1h->policy();
+ _total_mutator_refined_cards = policy->total_mutator_refined_cards();
+ _total_concurrent_refined_cards = policy->total_concurrent_refined_cards();
_num_coarsenings = HeapRegionRemSet::n_coarsenings();
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- G1ConcurrentRefine* cg1r = g1h->concurrent_refine();
if (_rs_threads_vtimes != NULL) {
GetRSThreadVTimeClosure p(this);
- cg1r->threads_do(&p);
+ g1h->concurrent_refine()->threads_do(&p);
}
set_sampling_thread_vtime(g1h->sampling_thread()->vtime_accum());
}
@@ -83,9 +83,8 @@
G1RemSetSummary::G1RemSetSummary() :
_rem_set(NULL),
- _num_conc_refined_cards(0),
- _num_processed_buf_mutator(0),
- _num_processed_buf_rs_threads(0),
+ _total_mutator_refined_cards(0),
+ _total_concurrent_refined_cards(0),
_num_coarsenings(0),
_num_vtimes(G1ConcurrentRefine::max_num_threads()),
_rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
@@ -96,9 +95,8 @@
G1RemSetSummary::G1RemSetSummary(G1RemSet* rem_set) :
_rem_set(rem_set),
- _num_conc_refined_cards(0),
- _num_processed_buf_mutator(0),
- _num_processed_buf_rs_threads(0),
+ _total_mutator_refined_cards(0),
+ _total_concurrent_refined_cards(0),
_num_coarsenings(0),
_num_vtimes(G1ConcurrentRefine::max_num_threads()),
_rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
@@ -114,12 +112,10 @@
assert(other != NULL, "just checking");
assert(_num_vtimes == other->_num_vtimes, "just checking");
- _num_conc_refined_cards = other->num_conc_refined_cards();
+ _total_mutator_refined_cards = other->total_mutator_refined_cards();
+ _total_concurrent_refined_cards = other->total_concurrent_refined_cards();
- _num_processed_buf_mutator = other->num_processed_buf_mutator();
- _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads();
-
- _num_coarsenings = other->_num_coarsenings;
+ _num_coarsenings = other->num_coarsenings();
memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes);
@@ -130,10 +126,8 @@
assert(other != NULL, "just checking");
assert(_num_vtimes == other->_num_vtimes, "just checking");
- _num_conc_refined_cards = other->num_conc_refined_cards() - _num_conc_refined_cards;
-
- _num_processed_buf_mutator = other->num_processed_buf_mutator() - _num_processed_buf_mutator;
- _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads() - _num_processed_buf_rs_threads;
+ _total_mutator_refined_cards = other->total_mutator_refined_cards() - _total_mutator_refined_cards;
+ _total_concurrent_refined_cards = other->total_concurrent_refined_cards() - _total_concurrent_refined_cards;
_num_coarsenings = other->num_coarsenings() - _num_coarsenings;
@@ -356,16 +350,15 @@
void G1RemSetSummary::print_on(outputStream* out) {
out->print_cr(" Recent concurrent refinement statistics");
- out->print_cr(" Processed " SIZE_FORMAT " cards concurrently", num_conc_refined_cards());
- out->print_cr(" Of " SIZE_FORMAT " completed buffers:", num_processed_buf_total());
- out->print_cr(" " SIZE_FORMAT_W(8) " (%5.1f%%) by concurrent RS threads.",
- num_processed_buf_total(),
- percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
+ out->print_cr(" Of " SIZE_FORMAT " refined cards:", total_refined_cards());
+ out->print_cr(" " SIZE_FORMAT_W(8) " (%5.1f%%) by concurrent refinement threads.",
+ total_concurrent_refined_cards(),
+ percent_of(total_concurrent_refined_cards(), total_refined_cards()));
out->print_cr(" " SIZE_FORMAT_W(8) " (%5.1f%%) by mutator threads.",
- num_processed_buf_mutator(),
- percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
+ total_mutator_refined_cards(),
+ percent_of(total_mutator_refined_cards(), total_refined_cards()));
out->print_cr(" Did " SIZE_FORMAT " coarsenings.", num_coarsenings());
- out->print_cr(" Concurrent RS threads times (s)");
+ out->print_cr(" Concurrent refinement threads times (s)");
out->print(" ");
for (uint i = 0; i < _num_vtimes; i++) {
out->print(" %5.2f", rs_thread_vtime(i));
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -38,9 +38,8 @@
G1RemSet* _rem_set;
- size_t _num_conc_refined_cards;
- size_t _num_processed_buf_mutator;
- size_t _num_processed_buf_rs_threads;
+ size_t _total_mutator_refined_cards;
+ size_t _total_concurrent_refined_cards;
size_t _num_coarsenings;
@@ -76,20 +75,16 @@
return _sampling_thread_vtime;
}
- size_t num_conc_refined_cards() const {
- return _num_conc_refined_cards;
+ size_t total_mutator_refined_cards() const {
+ return _total_mutator_refined_cards;
}
- size_t num_processed_buf_mutator() const {
- return _num_processed_buf_mutator;
+ size_t total_concurrent_refined_cards() const {
+ return _total_concurrent_refined_cards;
}
- size_t num_processed_buf_rs_threads() const {
- return _num_processed_buf_rs_threads;
- }
-
- size_t num_processed_buf_total() const {
- return num_processed_buf_mutator() + num_processed_buf_rs_threads();
+ size_t total_refined_cards() const {
+ return total_mutator_refined_cards() + total_concurrent_refined_cards();
}
size_t num_coarsenings() const {
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -71,7 +71,7 @@
_g1h(g1h),
_process_strong_tasks(G1RP_PS_NumElements),
_srs(n_workers),
- _lock(Mutex::leaf, "G1 Root Scan barrier lock", false, Monitor::_safepoint_check_never),
+ _lock(Mutex::leaf, "G1 Root Scan barrier lock", false, Mutex::_safepoint_check_never),
_n_workers_discovered_strong_classes(0) {}
void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id) {
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -428,7 +428,7 @@
HeapRegion* hr)
: _bot(bot),
_code_roots(),
- _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
+ _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Mutex::_safepoint_check_never),
_other_regions(&_m),
_hr(hr),
_state(Untracked)
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -264,7 +264,7 @@
virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
// Support for GC barriers emitted during parsing
- virtual bool has_load_barriers() const { return false; }
+ virtual bool has_load_barrier_nodes() const { return false; }
virtual bool is_gc_barrier_node(Node* node) const { return false; }
virtual Node* step_over_gc_barrier(Node* c) const { return c; }
virtual Node* step_over_gc_barrier_ctrl(Node* c) const { return c; }
@@ -287,13 +287,9 @@
virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
virtual bool has_special_unique_user(const Node* node) const { return false; }
- virtual bool needs_anti_dependence_check(const Node* node) const { return true; }
-
- virtual void barrier_insertion_phase(Compile* C, PhaseIterGVN &igvn) const { }
enum CompilePhase {
BeforeOptimize,
- BeforeLateInsertion,
BeforeMacroExpand,
BeforeCodeGen
};
@@ -320,6 +316,10 @@
virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const { return NULL; }
virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { return false; }
virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const { return false; }
+
+ virtual void late_barrier_analysis() const { }
+ virtual int estimate_stub_size() const { return 0; }
+ virtual void emit_stubs(CodeBuffer& cb) const { }
};
#endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -38,6 +38,7 @@
#include "opto/movenode.hpp"
#include "opto/narrowptrnode.hpp"
#include "opto/rootnode.hpp"
+#include "opto/runtime.hpp"
ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
@@ -461,11 +462,9 @@
}
const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
- const Type **fields = TypeTuple::fields(3);
- fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // src
- fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // dst
- fields[TypeFunc::Parms+2] = TypeInt::INT; // length
- const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields);
+ const Type **fields = TypeTuple::fields(1);
+ fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
+ const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
// create result type (range)
fields = TypeTuple::fields(0);
@@ -796,8 +795,6 @@
return false;
}
-#define XTOP LP64_ONLY(COMMA phase->top())
-
void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
Node* ctrl = ac->in(TypeFunc::Control);
Node* mem = ac->in(TypeFunc::Memory);
@@ -807,14 +804,66 @@
Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
Node* length = ac->in(ArrayCopyNode::Length);
assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null");
+ assert (src->is_AddP(), "for clone the src should be the interior ptr");
+ assert (dest->is_AddP(), "for clone the dst should be the interior ptr");
+
if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
- Node* call = phase->make_leaf_call(ctrl, mem,
+ // Check if heap is has forwarded objects. If it does, we need to call into the special
+ // routine that would fix up source references before we can continue.
+
+ enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
+ Node* region = new RegionNode(PATH_LIMIT);
+ Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
+
+ Node* thread = phase->transform_later(new ThreadLocalNode());
+ Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
+
+ uint gc_state_idx = Compile::AliasIdxRaw;
+ const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
+ debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
+
+ Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
+ Node* stable_and = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED)));
+ Node* stable_cmp = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
+ Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
+
+ IfNode* stable_iff = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
+ Node* stable_ctrl = phase->transform_later(new IfFalseNode(stable_iff));
+ Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
+
+ // Heap is stable, no need to do anything additional
+ region->init_req(_heap_stable, stable_ctrl);
+ mem_phi->init_req(_heap_stable, mem);
+
+ // Heap is unstable, call into clone barrier stub
+ Node* call = phase->make_leaf_call(unstable_ctrl, mem,
ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(),
CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
"shenandoah_clone",
TypeRawPtr::BOTTOM,
- src, dest, length);
+ src->in(AddPNode::Base));
call = phase->transform_later(call);
+
+ ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control));
+ mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory));
+ region->init_req(_heap_unstable, ctrl);
+ mem_phi->init_req(_heap_unstable, mem);
+
+ // Wire up the actual arraycopy stub now
+ ctrl = phase->transform_later(region);
+ mem = phase->transform_later(mem_phi);
+
+ const char* name = "arraycopy";
+ call = phase->make_leaf_call(ctrl, mem,
+ OptoRuntime::fast_arraycopy_Type(),
+ phase->basictype2arraycopy(T_LONG, NULL, NULL, true, name, true),
+ name, TypeRawPtr::BOTTOM,
+ src, dest, length
+ LP64_ONLY(COMMA phase->top()));
+ call = phase->transform_later(call);
+
+ // Hook up the whole thing into the graph
phase->igvn().replace_node(ac, call);
} else {
BarrierSetC2::clone_at_expansion(phase, ac);
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -103,7 +103,7 @@
static const TypeFunc* write_ref_field_pre_entry_Type();
static const TypeFunc* shenandoah_clone_barrier_Type();
static const TypeFunc* shenandoah_load_reference_barrier_Type();
- virtual bool has_load_barriers() const { return true; }
+ virtual bool has_load_barrier_nodes() const { return true; }
// This is the entry-point for the backend to perform accesses through the Access API.
virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const;
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1272,6 +1272,38 @@
}
if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
+ if (call->entry_point() == OptoRuntime::rethrow_stub()) {
+ // The rethrow call may have too many projections to be
+ // properly handled here. Given there's no reason for a
+ // barrier to depend on the call, move it above the call
+ stack.push(lrb, 0);
+ do {
+ Node* n = stack.node();
+ uint idx = stack.index();
+ if (idx < n->req()) {
+ Node* in = n->in(idx);
+ stack.set_index(idx+1);
+ if (in != NULL) {
+ if (phase->has_ctrl(in)) {
+ if (phase->is_dominator(call, phase->get_ctrl(in))) {
+#ifdef ASSERT
+ for (uint i = 0; i < stack.size(); i++) {
+ assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
+ }
+#endif
+ stack.push(in, 0);
+ }
+ } else {
+ assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
+ }
+ }
+ } else {
+ phase->set_ctrl(n, call->in(0));
+ stack.pop();
+ }
+ } while(stack.size() > 0);
+ continue;
+ }
CallProjections projs;
call->extract_projections(&projs, false, false);
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -66,9 +66,12 @@
size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste);
- log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
- SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
- free_target / M, actual_free / M, max_cset / M, min_garbage / M);
+ log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: "
+ SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target),
+ byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free),
+ byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset),
+ byte_size_in_proper_unit(min_garbage), proper_unit_for_byte_size(min_garbage));
// Better select garbage-first regions
QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
@@ -119,8 +122,9 @@
// anything else.
size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
if (available < min_threshold) {
- log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
- available / M, min_threshold / M);
+ log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)",
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold));
return true;
}
@@ -129,8 +133,10 @@
if (_gc_times_learned < max_learn) {
size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
if (available < init_threshold) {
- log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
- _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
+ log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)",
+ _gc_times_learned + 1, max_learn,
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold));
return true;
}
}
@@ -154,10 +160,15 @@
double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last;
if (average_gc > allocation_headroom / allocation_rate) {
- log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)",
- average_gc * 1000, allocation_rate / M, allocation_headroom / M);
- log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M",
- available / M, spike_headroom / M, penalties / M, allocation_headroom / M);
+ log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s)",
+ average_gc * 1000,
+ byte_size_in_proper_unit(allocation_rate), proper_unit_for_byte_size(allocation_rate),
+ byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom));
+ log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom),
+ byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties),
+ byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom));
return true;
}
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -59,21 +59,24 @@
size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
if (available < min_threshold) {
- log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
- available / M, min_threshold / M);
+ log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)",
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold));
return true;
}
if (available < threshold_bytes_allocated) {
- log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is lower than allocated recently (" SIZE_FORMAT "M)",
- available / M, threshold_bytes_allocated / M);
+ log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is lower than allocated recently (" SIZE_FORMAT "%s)",
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated));
return true;
}
size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
if (bytes_allocated > threshold_bytes_allocated) {
- log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "M) is larger than allocation threshold (" SIZE_FORMAT "M)",
- bytes_allocated / M, threshold_bytes_allocated / M);
+ log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)",
+ byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated),
+ byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated));
return true;
}
@@ -86,8 +89,9 @@
// Do not select too large CSet that would overflow the available free space
size_t max_cset = actual_free * 3 / 4;
- log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M",
- actual_free / M, max_cset / M);
+ log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free),
+ byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset));
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -60,8 +60,9 @@
size_t available = MAX2(capacity / 100 * ShenandoahEvacReserve, actual_free);
size_t max_cset = (size_t)(available / ShenandoahEvacWaste);
- log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M",
- actual_free / M, max_cset / M);
+ log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free),
+ byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset));
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -57,8 +57,9 @@
size_t threshold_available = capacity / 100 * ShenandoahFreeThreshold;
if (available < threshold_available) {
- log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below free threshold (" SIZE_FORMAT "M)",
- available / M, threshold_available / M);
+ log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below free threshold (" SIZE_FORMAT "%s)",
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available));
return true;
}
return ShenandoahHeuristics::should_start_gc();
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -100,9 +100,12 @@
size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste);
- log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
- SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
- free_target / M, actual_free / M, max_cset / M, min_garbage / M);
+ log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: "
+ SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target),
+ byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free),
+ byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset),
+ byte_size_in_proper_unit(min_garbage), proper_unit_for_byte_size(min_garbage));
// Better select garbage-first regions, and then older ones
QuickSort::sort<RegionData>(data, (int) cnt, compare_by_garbage_then_alloc_seq_ascending, false);
@@ -190,8 +193,9 @@
// anything else.
size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
if (available < min_threshold) {
- log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
- available / M, min_threshold / M);
+ log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)",
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold));
return true;
}
@@ -200,8 +204,10 @@
if (_gc_times_learned < max_learn) {
size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
if (available < init_threshold) {
- log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
- _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
+ log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)",
+ _gc_times_learned + 1, max_learn,
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold));
return true;
}
}
@@ -223,10 +229,15 @@
double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last;
if (average_gc > allocation_headroom / allocation_rate) {
- log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)",
- average_gc * 1000, allocation_rate / M, allocation_headroom / M);
- log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M",
- available / M, spike_headroom / M, penalties / M, allocation_headroom / M);
+ log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s)",
+ average_gc * 1000,
+ byte_size_in_proper_unit(allocation_rate), proper_unit_for_byte_size(allocation_rate),
+ byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom));
+ log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+ byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom),
+ byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties),
+ byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom));
return true;
} else if (ShenandoahHeuristics::should_start_gc()) {
return true;
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -69,7 +69,8 @@
// enough, but we also do not want to steal too much CPU from the concurrently running
// application. Using 1/4 of available threads for concurrent GC seems a good
// compromise here.
- if (FLAG_IS_DEFAULT(ConcGCThreads)) {
+ bool ergo_conc = FLAG_IS_DEFAULT(ConcGCThreads);
+ if (ergo_conc) {
FLAG_SET_DEFAULT(ConcGCThreads, MAX2(1, os::processor_count() / 4));
}
@@ -82,7 +83,8 @@
// that will overwhelm the OS scheduler. Using 1/2 of available threads seems to be a fair
// compromise here. Due to implementation constraints, it should not be lower than
// the number of concurrent threads.
- if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+ bool ergo_parallel = FLAG_IS_DEFAULT(ParallelGCThreads);
+ if (ergo_parallel) {
FLAG_SET_DEFAULT(ParallelGCThreads, MAX2(1, os::processor_count() / 2));
}
@@ -90,9 +92,21 @@
vm_exit_during_initialization("Shenandoah expects ParallelGCThreads > 0, check -XX:ParallelGCThreads=#");
}
+ // Make sure ergonomic decisions do not break the thread count invariants.
+ // This may happen when user overrides one of the flags, but not the other.
+ // When that happens, we want to adjust the setting that was set ergonomically.
if (ParallelGCThreads < ConcGCThreads) {
- warning("Shenandoah expects ConcGCThreads <= ParallelGCThreads, adjusting ParallelGCThreads automatically");
- FLAG_SET_DEFAULT(ParallelGCThreads, ConcGCThreads);
+ if (ergo_conc && !ergo_parallel) {
+ FLAG_SET_DEFAULT(ConcGCThreads, ParallelGCThreads);
+ } else if (!ergo_conc && ergo_parallel) {
+ FLAG_SET_DEFAULT(ParallelGCThreads, ConcGCThreads);
+ } else if (ergo_conc && ergo_parallel) {
+ // Should not happen, check the ergonomic computation above. Fail with relevant error.
+ vm_exit_during_initialization("Shenandoah thread count ergonomic error");
+ } else {
+ // User settings error, report and ask user to rectify.
+ vm_exit_during_initialization("Shenandoah expects ConcGCThreads <= ParallelGCThreads, check -XX:ParallelGCThreads, -XX:ConcGCThreads");
+ }
}
if (FLAG_IS_DEFAULT(ParallelRefProcEnabled)) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
@@ -283,3 +284,10 @@
return load_reference_barrier_not_null(obj);
}
+
+void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
+ if (_heap->has_forwarded_objects()) {
+ clone_barrier(src);
+ }
+}
+
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -71,6 +71,7 @@
inline void arraycopy_update(oop* src, size_t count);
inline void arraycopy_update(narrowOop* src, size_t count);
inline void clone_barrier(oop src);
+ void clone_barrier_runtime(oop src);
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -186,7 +186,7 @@
template <DecoratorSet decorators, typename BarrierSetT>
void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
if (ShenandoahCloneBarrier) {
- ShenandoahBarrierSet::barrier_set()->clone_barrier(src);
+ ShenandoahBarrierSet::barrier_set()->clone_barrier_runtime(src);
}
Raw::clone(src, dst, size);
}
@@ -273,67 +273,4 @@
arraycopy_update_impl(src, count);
}
-template <bool EVAC, bool ENQUEUE>
-class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
-private:
- ShenandoahHeap* const _heap;
- ShenandoahBarrierSet* const _bs;
- const ShenandoahCollectionSet* const _cset;
- Thread* const _thread;
-
- template <class T>
- inline void do_oop_work(T* p) {
- T o = RawAccess<>::oop_load(p);
- if (!CompressedOops::is_null(o)) {
- oop obj = CompressedOops::decode_not_null(o);
- if (_cset->is_in((HeapWord *)obj)) {
- oop fwd = _bs->resolve_forwarded_not_null(obj);
- if (EVAC && obj == fwd) {
- fwd = _heap->evacuate_object(obj, _thread);
- }
- if (ENQUEUE) {
- _bs->enqueue(fwd);
- }
- assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded");
- ShenandoahHeap::cas_oop(fwd, p, o);
- }
-
- }
- }
-public:
- ShenandoahUpdateRefsForOopClosure() :
- _heap(ShenandoahHeap::heap()),
- _bs(ShenandoahBarrierSet::barrier_set()),
- _cset(_heap->collection_set()),
- _thread(Thread::current()) {
- }
-
- virtual void do_oop(oop* p) { do_oop_work(p); }
- virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-};
-
-void ShenandoahBarrierSet::clone_barrier(oop obj) {
- assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled");
- if (!_heap->has_forwarded_objects()) return;
-
- // This is called for cloning an object (see jvm.cpp) after the clone
- // has been made. We are not interested in any 'previous value' because
- // it would be NULL in any case. But we *are* interested in any oop*
- // that potentially need to be updated.
-
- shenandoah_assert_correct(NULL, obj);
- if (_heap->is_evacuation_in_progress()) {
- ShenandoahEvacOOMScope evac_scope;
- ShenandoahUpdateRefsForOopClosure</* evac = */ true, /* enqueue */ false> cl;
- obj->oop_iterate(&cl);
- } else if (_heap->is_concurrent_traversal_in_progress()) {
- ShenandoahEvacOOMScope evac_scope;
- ShenandoahUpdateRefsForOopClosure</* evac = */ true, /* enqueue */ true> cl;
- obj->oop_iterate(&cl);
- } else {
- ShenandoahUpdateRefsForOopClosure</* evac = */ false, /* enqueue */ false> cl;
- obj->oop_iterate(&cl);
- }
-}
-
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "memory/iterator.hpp"
+#include "oops/access.hpp"
+#include "oops/compressedOops.hpp"
+
+template <bool EVAC, bool ENQUEUE>
+class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
+private:
+ ShenandoahHeap* const _heap;
+ ShenandoahBarrierSet* const _bs;
+ const ShenandoahCollectionSet* const _cset;
+ Thread* const _thread;
+
+ template <class T>
+ inline void do_oop_work(T* p) {
+ T o = RawAccess<>::oop_load(p);
+ if (!CompressedOops::is_null(o)) {
+ oop obj = CompressedOops::decode_not_null(o);
+ if (_cset->is_in((HeapWord *)obj)) {
+ oop fwd = _bs->resolve_forwarded_not_null(obj);
+ if (EVAC && obj == fwd) {
+ fwd = _heap->evacuate_object(obj, _thread);
+ }
+ if (ENQUEUE) {
+ _bs->enqueue(fwd);
+ }
+ assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded");
+ ShenandoahHeap::cas_oop(fwd, p, o);
+ }
+
+ }
+ }
+public:
+ ShenandoahUpdateRefsForOopClosure() :
+ _heap(ShenandoahHeap::heap()),
+ _bs(ShenandoahBarrierSet::barrier_set()),
+ _cset(_heap->collection_set()),
+ _thread(Thread::current()) {
+ }
+
+ virtual void do_oop(oop* p) { do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+void ShenandoahBarrierSet::clone_barrier(oop obj) {
+ assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled");
+ assert(_heap->has_forwarded_objects(), "only when heap is unstable");
+
+ // This is called for cloning an object (see jvm.cpp) after the clone
+ // has been made. We are not interested in any 'previous value' because
+ // it would be NULL in any case. But we *are* interested in any oop*
+ // that potentially need to be updated.
+
+ shenandoah_assert_correct(NULL, obj);
+ if (_heap->is_evacuation_in_progress()) {
+ ShenandoahEvacOOMScope evac_scope;
+ ShenandoahUpdateRefsForOopClosure</* evac = */ true, /* enqueue */ false> cl;
+ obj->oop_iterate(&cl);
+ } else if (_heap->is_concurrent_traversal_in_progress()) {
+ ShenandoahEvacOOMScope evac_scope;
+ ShenandoahUpdateRefsForOopClosure</* evac = */ true, /* enqueue */ true> cl;
+ obj->oop_iterate(&cl);
+ } else {
+ ShenandoahUpdateRefsForOopClosure</* evac = */ false, /* enqueue */ false> cl;
+ obj->oop_iterate(&cl);
+ }
+}
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -491,8 +491,12 @@
size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
size_t free = capacity() - used();
- ls.print("Free: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ",
- total_free / M, mutator_count(), max / K, max_humongous / K);
+ ls.print("Free: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s, Max humongous: " SIZE_FORMAT "%s, ",
+ byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
+ mutator_count(),
+ byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
+ byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
+ );
size_t frag_ext;
if (free > 0) {
@@ -525,8 +529,10 @@
}
}
- ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K",
- total_free / M, collector_count(), max / K);
+ ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
+ collector_count(),
+ byte_size_in_proper_unit(max), proper_unit_for_byte_size(max));
}
}
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -522,10 +522,14 @@
void ShenandoahHeap::print_on(outputStream* st) const {
st->print_cr("Shenandoah Heap");
- st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
- max_capacity() / K, committed() / K, used() / K);
- st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
- num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
+ st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
+ byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
+ byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
+ byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
+ st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
+ num_regions(),
+ byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
+ proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
st->print("Status: ");
if (has_forwarded_objects()) st->print("has forwarded objects, ");
@@ -959,7 +963,8 @@
ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
ShenandoahHeapRegion* r;
while ((r =_cs->claim_next()) != NULL) {
- assert(r->has_live(), "all-garbage regions are reclaimed early");
+ assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->region_number());
+ assert(r->is_conc_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->region_number());
_sh->marked_object_iterate(r, &cl);
if (ShenandoahPacing) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -527,29 +527,35 @@
size_t region_size;
if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
- err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
- "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
- max_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
+ err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
+ "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
+ MIN_NUM_REGIONS,
+ byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
}
if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
- err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
- ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K);
+ err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
+ byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
}
if (ShenandoahMinRegionSize < MinTLABSize) {
- err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
- ShenandoahMinRegionSize/K, MinTLABSize/K);
+ err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
+ byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize));
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
}
if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
- err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
- ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K);
+ err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
+ byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
}
if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
- err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
- ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
+ err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
+ byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
}
@@ -563,19 +569,23 @@
} else {
if (ShenandoahHeapRegionSize > max_heap_size / MIN_NUM_REGIONS) {
- err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
- "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
- max_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
+ err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
+ "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
+ MIN_NUM_REGIONS,
+ byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize));
vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
}
if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
- err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
- ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
+ err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize),
+ byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
}
if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
- err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
- ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
+ err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
+ byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize),
+ byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
}
region_size = ShenandoahHeapRegionSize;
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -198,7 +198,8 @@
// Macro-properties:
bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; }
- bool is_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
+ bool is_conc_move_allowed() const { return is_regular() || _state == _cset; }
+ bool is_stw_move_allowed() const { return is_conc_move_allowed() || (ShenandoahHumongousMoves && _state == _humongous_start); }
RegionState state() const { return _state; }
int state_ordinal() const { return region_state_to_ordinal(_state); }
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -186,8 +186,9 @@
// given the amount of immediately reclaimable garbage. If we do, figure out the collection set.
assert (immediate_garbage <= total_garbage,
- "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "M vs " SIZE_FORMAT "M",
- immediate_garbage / M, total_garbage / M);
+ "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage),
+ byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
size_t immediate_percent = total_garbage == 0 ? 0 : (immediate_garbage * 100 / total_garbage);
@@ -196,12 +197,16 @@
collection_set->update_region_status();
size_t cset_percent = total_garbage == 0 ? 0 : (collection_set->garbage() * 100 / total_garbage);
- log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M (" SIZE_FORMAT "%% of total), " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
- collection_set->garbage() / M, cset_percent, collection_set->live_data() / M, collection_set->count());
+ log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%% of total), " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
+ byte_size_in_proper_unit(collection_set->garbage()), proper_unit_for_byte_size(collection_set->garbage()),
+ cset_percent,
+ byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()),
+ collection_set->count());
}
- log_info(gc, ergo)("Immediate Garbage: " SIZE_FORMAT "M (" SIZE_FORMAT "%% of total), " SIZE_FORMAT " regions",
- immediate_garbage / M, immediate_percent, immediate_regions);
+ log_info(gc, ergo)("Immediate Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%% of total), " SIZE_FORMAT " regions",
+ byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage),
+ immediate_percent, immediate_regions);
}
void ShenandoahHeuristics::record_gc_start() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -327,14 +327,25 @@
ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) {
ShenandoahHeapRegion* from_region = _heap_regions.next();
- while (from_region != NULL && (!from_region->is_move_allowed() || from_region->is_humongous())) {
+ // Look for next candidate for this slice:
+ while (from_region != NULL) {
+ // Empty region: get it into the slice to defragment the slice itself.
+ // We could have skipped this without violating correctness, but we really
+ // want to compact all live regions to the start of the heap, which sometimes
+ // means moving them into the fully empty regions.
+ if (from_region->is_empty()) break;
+
+ // Can move the region, and this is not the humongous region. Humongous
+ // moves are special cased here, because their moves are handled separately.
+ if (from_region->is_stw_move_allowed() && !from_region->is_humongous()) break;
+
from_region = _heap_regions.next();
}
if (from_region != NULL) {
assert(slice != NULL, "sanity");
assert(!from_region->is_humongous(), "this path cannot handle humongous regions");
- assert(from_region->is_move_allowed(), "only regions that can be moved in mark-compact");
+ assert(from_region->is_empty() || from_region->is_stw_move_allowed(), "only regions that can be moved in mark-compact");
slice->add_region(from_region);
}
@@ -408,7 +419,7 @@
continue;
}
- if (r->is_humongous_start() && r->is_move_allowed()) {
+ if (r->is_humongous_start() && r->is_stw_move_allowed()) {
// From-region candidate: movable humongous region
oop old_obj = oop(r->bottom());
size_t words_size = old_obj->size();
@@ -750,7 +761,7 @@
size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
size_t new_end = new_start + num_regions - 1;
assert(old_start != new_start, "must be real move");
- assert (r->is_move_allowed(), "should be movable");
+ assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->region_number());
Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
heap->get_region(new_start)->bottom(),
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -70,9 +70,12 @@
restart_with(non_taxable, tax);
- log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
- "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
- live / M, free / M, non_taxable / M, tax);
+ log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, "
+ "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
+ byte_size_in_proper_unit(live), proper_unit_for_byte_size(live),
+ byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
+ byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
+ tax);
}
void ShenandoahPacer::setup_for_evac() {
@@ -91,9 +94,12 @@
restart_with(non_taxable, tax);
- log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
- "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
- used / M, free / M, non_taxable / M, tax);
+ log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, "
+ "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
+ byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
+ byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
+ byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
+ tax);
}
void ShenandoahPacer::setup_for_updaterefs() {
@@ -112,9 +118,12 @@
restart_with(non_taxable, tax);
- log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
- "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
- used / M, free / M, non_taxable / M, tax);
+ log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, "
+ "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
+ byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
+ byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
+ byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
+ tax);
}
/*
@@ -136,9 +145,12 @@
restart_with(non_taxable, tax);
- log_info(gc, ergo)("Pacer for Traversal. Expected Live: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
- "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
- live / M, free / M, non_taxable / M, tax);
+ log_info(gc, ergo)("Pacer for Traversal. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, "
+ "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
+ byte_size_in_proper_unit(live), proper_unit_for_byte_size(live),
+ byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
+ byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
+ tax);
}
/*
@@ -158,8 +170,9 @@
restart_with(initial, tax);
- log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
- initial / M, tax);
+ log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
+ byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial),
+ tax);
}
size_t ShenandoahPacer::update_and_get_progress_history() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -23,10 +23,12 @@
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
#include "gc/shenandoah/shenandoahRuntime.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "utilities/copy.hpp"
void ShenandoahRuntime::write_ref_array_pre_oop_entry(oop* src, oop* dst, size_t length) {
ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set();
@@ -74,13 +76,10 @@
// Shenandoah clone barrier: makes sure that references point to to-space
// in cloned objects.
-JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* s, oopDesc* d, size_t length))
- oop src = oop(s);
- oop dst = oop(d);
- shenandoah_assert_correct(NULL, src);
- shenandoah_assert_correct(NULL, dst);
- ShenandoahBarrierSet::barrier_set()->clone_barrier(src);
- RawAccessBarrier<IS_NOT_NULL>::clone(src, dst, length);
+JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* src))
+ oop s = oop(src);
+ shenandoah_assert_correct(NULL, s);
+ ShenandoahBarrierSet::barrier_set()->clone_barrier(s);
JRT_END
JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_native(oopDesc * src))
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -44,7 +44,7 @@
static oopDesc* load_reference_barrier_native(oopDesc* src);
- static void shenandoah_clone_barrier(oopDesc* s, oopDesc* d, size_t length);
+ static void shenandoah_clone_barrier(oopDesc* src);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -205,8 +205,11 @@
void ShenandoahStrDedupQueue::print_statistics_impl() {
Log(gc, stringdedup) log;
log.debug(" Queue:");
- log.debug(" Total buffers: " SIZE_FORMAT " (" SIZE_FORMAT " K). " SIZE_FORMAT " buffers are on free list",
- _total_buffers, (_total_buffers * sizeof(ShenandoahQueueBuffer) / K), _num_free_buffer);
+ log.debug(" Total buffers: " SIZE_FORMAT " (" SIZE_FORMAT " %s). " SIZE_FORMAT " buffers are on free list",
+ _total_buffers,
+ byte_size_in_proper_unit(_total_buffers * sizeof(ShenandoahQueueBuffer)),
+ proper_unit_for_byte_size(_total_buffers * sizeof(ShenandoahQueueBuffer)),
+ _num_free_buffer);
}
class VerifyQueueClosure : public OopClosure {
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -366,8 +366,10 @@
// Rebuild free set
free_set->rebuild();
- log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
- collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
+ log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
+ byte_size_in_proper_unit(collection_set->garbage()), proper_unit_for_byte_size(collection_set->garbage()),
+ byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()),
+ collection_set->count());
}
void ShenandoahTraversalGC::init_traversal_collection() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -686,13 +686,17 @@
_heap->heap_region_iterate(&cl);
size_t heap_used = _heap->used();
guarantee(cl.used() == heap_used,
- "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "K, regions-used = " SIZE_FORMAT "K",
- label, heap_used/K, cl.used()/K);
+ "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s",
+ label,
+ byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used),
+ byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used()));
size_t heap_committed = _heap->committed();
guarantee(cl.committed() == heap_committed,
- "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "K, regions-committed = " SIZE_FORMAT "K",
- label, heap_committed/K, cl.committed()/K);
+ "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "%s, regions-committed = " SIZE_FORMAT "%s",
+ label,
+ byte_size_in_exact_unit(heap_committed), proper_unit_for_byte_size(heap_committed),
+ byte_size_in_exact_unit(cl.committed()), proper_unit_for_byte_size(cl.committed()));
}
// Internal heap region checks
--- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -299,11 +299,11 @@
"Should internally-caused GCs invoke concurrent cycles, or go to" \
"stop-the-world (degenerated/full)?") \
\
- experimental(bool, ShenandoahHumongousMoves, true, \
+ diagnostic(bool, ShenandoahHumongousMoves, true, \
"Allow moving humongous regions. This makes GC more resistant " \
"to external fragmentation that may otherwise fail other " \
"humongous allocations, at the expense of higher GC copying " \
- "costs.") \
+ "costs. Currently affects stop-the-world (full) cycle only.") \
\
diagnostic(bool, ShenandoahOOMDuringEvacALot, false, \
"Simulate OOM during evacuation frequently.") \
@@ -314,9 +314,6 @@
diagnostic(bool, ShenandoahTerminationTrace, false, \
"Tracing task termination timings") \
\
- develop(bool, ShenandoahVerifyObjectEquals, false, \
- "Verify that == and != are not used on oops. Only in fastdebug") \
- \
diagnostic(bool, ShenandoahAlwaysPreTouch, false, \
"Pre-touch heap memory, overrides global AlwaysPreTouch") \
\
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -22,451 +22,157 @@
*/
#include "precompiled.hpp"
-#include "opto/castnode.hpp"
+#include "classfile/javaClasses.hpp"
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+#include "opto/block.hpp"
#include "opto/compile.hpp"
-#include "opto/escape.hpp"
#include "opto/graphKit.hpp"
-#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
-#include "opto/macro.hpp"
#include "opto/memnode.hpp"
-#include "opto/movenode.hpp"
#include "opto/node.hpp"
-#include "opto/phase.hpp"
-#include "opto/phaseX.hpp"
+#include "opto/regalloc.hpp"
#include "opto/rootnode.hpp"
-#include "opto/type.hpp"
-#include "utilities/copy.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
-#include "gc/z/zBarrierSet.hpp"
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#include "gc/z/zThreadLocalData.hpp"
-#include "gc/z/zBarrierSetRuntime.hpp"
-ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena) :
- _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8, 0, NULL)) {}
+class ZBarrierSetC2State : public ResourceObj {
+private:
+ GrowableArray<ZLoadBarrierStubC2*>* _stubs;
+ Node_Array _live;
-int ZBarrierSetC2State::load_barrier_count() const {
- return _load_barrier_nodes->length();
-}
+public:
+ ZBarrierSetC2State(Arena* arena) :
+ _stubs(new (arena) GrowableArray<ZLoadBarrierStubC2*>(arena, 8, 0, NULL)),
+ _live(arena) {}
-void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
- assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
- _load_barrier_nodes->append(n);
-}
+ GrowableArray<ZLoadBarrierStubC2*>* stubs() {
+ return _stubs;
+ }
-void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
- // this function may be called twice for a node so check
- // that the node is in the array before attempting to remove it
- if (_load_barrier_nodes->contains(n)) {
- _load_barrier_nodes->remove(n);
- }
-}
+ RegMask* live(const Node* node) {
+ if (!node->is_Mach()) {
+ // Don't need liveness for non-MachNodes
+ return NULL;
+ }
-LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
- return _load_barrier_nodes->at(idx);
-}
+ const MachNode* const mach = node->as_Mach();
+ if (mach->barrier_data() != ZLoadBarrierStrong &&
+ mach->barrier_data() != ZLoadBarrierWeak) {
+ // Don't need liveness data for nodes without barriers
+ return NULL;
+ }
-void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
- return new(comp_arena) ZBarrierSetC2State(comp_arena);
-}
+ RegMask* live = (RegMask*)_live[node->_idx];
+ if (live == NULL) {
+ live = new (Compile::current()->comp_arena()->Amalloc_D(sizeof(RegMask))) RegMask();
+ _live.map(node->_idx, (Node*)live);
+ }
-ZBarrierSetC2State* ZBarrierSetC2::state() const {
+ return live;
+ }
+};
+
+static ZBarrierSetC2State* barrier_set_state() {
return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
}
-bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
- // 1. This step follows potential oop projections of a load barrier before expansion
- if (node->is_Proj()) {
- node = node->in(0);
- }
-
- // 2. This step checks for unexpanded load barriers
- if (node->is_LoadBarrier()) {
- return true;
- }
-
- // 3. This step checks for the phi corresponding to an optimized load barrier expansion
- if (node->is_Phi()) {
- PhiNode* phi = node->as_Phi();
- Node* n = phi->in(1);
- if (n != NULL && n->is_LoadBarrierSlowReg()) {
- return true;
- }
- }
-
- return false;
-}
-
-void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
- if (node->is_LoadBarrier()) {
- state()->add_load_barrier_node(node->as_LoadBarrier());
- }
-}
-
-void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
- if (node->is_LoadBarrier()) {
- state()->remove_load_barrier_node(node->as_LoadBarrier());
- }
-}
-
-void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
- // Remove useless LoadBarrier nodes
- ZBarrierSetC2State* s = state();
- for (int i = s->load_barrier_count()-1; i >= 0; i--) {
- LoadBarrierNode* n = s->load_barrier_node(i);
- if (!useful.member(n)) {
- unregister_potential_barrier_node(n);
- }
- }
-}
-
-void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
- if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
- igvn->_worklist.push(node);
- }
-}
-
-const uint NoBarrier = 0;
-const uint RequireBarrier = 1;
-const uint WeakBarrier = 2;
-const uint ExpandedBarrier = 4;
-
-static bool load_require_barrier(LoadNode* load) { return (load->barrier_data() & RequireBarrier) == RequireBarrier; }
-static bool load_has_weak_barrier(LoadNode* load) { return (load->barrier_data() & WeakBarrier) == WeakBarrier; }
-static bool load_has_expanded_barrier(LoadNode* load) { return (load->barrier_data() & ExpandedBarrier) == ExpandedBarrier; }
-static void load_set_expanded_barrier(LoadNode* load) { return load->set_barrier_data(ExpandedBarrier); }
-
-static void load_set_barrier(LoadNode* load, bool weak) {
- if (weak) {
- load->set_barrier_data(RequireBarrier | WeakBarrier);
- } else {
- load->set_barrier_data(RequireBarrier);
- }
-}
-
-// == LoadBarrierNode ==
-
-LoadBarrierNode::LoadBarrierNode(Compile* C,
- Node* c,
- Node* mem,
- Node* val,
- Node* adr,
- bool weak) :
- MultiNode(Number_of_Inputs),
- _weak(weak) {
- init_req(Control, c);
- init_req(Memory, mem);
- init_req(Oop, val);
- init_req(Address, adr);
- init_req(Similar, C->top());
-
- init_class_id(Class_LoadBarrier);
- BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
- bs->register_potential_barrier_node(this);
-}
-
-uint LoadBarrierNode::size_of() const {
- return sizeof(*this);
-}
-
-bool LoadBarrierNode::cmp(const Node& n) const {
- ShouldNotReachHere();
- return false;
-}
-
-const Type *LoadBarrierNode::bottom_type() const {
- const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
- Node* in_oop = in(Oop);
- floadbarrier[Control] = Type::CONTROL;
- floadbarrier[Memory] = Type::MEMORY;
- floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
- return TypeTuple::make(Number_of_Outputs, floadbarrier);
-}
-
-const TypePtr* LoadBarrierNode::adr_type() const {
- ShouldNotReachHere();
- return NULL;
-}
-
-const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
- const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
- const Type* val_t = phase->type(in(Oop));
- floadbarrier[Control] = Type::CONTROL;
- floadbarrier[Memory] = Type::MEMORY;
- floadbarrier[Oop] = val_t;
- return TypeTuple::make(Number_of_Outputs, floadbarrier);
-}
-
-bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
- if (phase != NULL) {
- return phase->is_dominator(d, n);
- }
-
- for (int i = 0; i < 10 && n != NULL; i++) {
- n = IfNode::up_one_dom(n, linear_only);
- if (n == d) {
- return true;
- }
- }
-
- return false;
-}
-
-LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
- if (is_weak()) {
- // Weak barriers can't be eliminated
- return NULL;
- }
-
- Node* val = in(LoadBarrierNode::Oop);
- if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
- LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
- assert(lb->in(Address) == in(Address), "");
- // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
- if (lb->in(Oop) == in(Oop)) {
- return lb;
- }
- // Follow chain of load barrier through Similar edges
- while (!lb->in(Similar)->is_top()) {
- lb = lb->in(Similar)->in(0)->as_LoadBarrier();
- assert(lb->in(Address) == in(Address), "");
- }
- if (lb != in(Similar)->in(0)) {
- return lb;
- }
- }
- for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
- Node* u = val->fast_out(i);
- if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
- Node* this_ctrl = in(LoadBarrierNode::Control);
- Node* other_ctrl = u->in(LoadBarrierNode::Control);
- if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
- return u->as_LoadBarrier();
- }
- }
- }
-
- if (can_be_eliminated()) {
- return NULL;
- }
-
- if (!look_for_similar) {
- return NULL;
+ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
+ ZLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2(node, ref_addr, ref, tmp, weak);
+ if (!Compile::current()->in_scratch_emit_size()) {
+ barrier_set_state()->stubs()->append(stub);
}
- Node* addr = in(LoadBarrierNode::Address);
- for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
- Node* u = addr->fast_out(i);
- if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
- Node* this_ctrl = in(LoadBarrierNode::Control);
- Node* other_ctrl = u->in(LoadBarrierNode::Control);
- if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
- ResourceMark rm;
- Unique_Node_List wq;
- wq.push(in(LoadBarrierNode::Control));
- bool ok = true;
- bool dom_found = false;
- for (uint next = 0; next < wq.size(); ++next) {
- Node *n = wq.at(next);
- if (n->is_top()) {
- return NULL;
- }
- assert(n->is_CFG(), "");
- if (n->is_SafePoint()) {
- ok = false;
- break;
- }
- if (n == u) {
- dom_found = true;
- continue;
- }
- if (n->is_Region()) {
- for (uint i = 1; i < n->req(); i++) {
- Node* m = n->in(i);
- if (m != NULL) {
- wq.push(m);
- }
- }
- } else {
- Node* m = n->in(0);
- if (m != NULL) {
- wq.push(m);
- }
- }
- }
- if (ok) {
- assert(dom_found, "");
- return u->as_LoadBarrier();
- }
- break;
- }
- }
- }
+ return stub;
+}
+
+ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) :
+ _node(node),
+ _ref_addr(ref_addr),
+ _ref(ref),
+ _tmp(tmp),
+ _weak(weak),
+ _entry(),
+ _continuation() {
+ assert_different_registers(ref, ref_addr.base());
+ assert_different_registers(ref, ref_addr.index());
+}
- return NULL;
+Address ZLoadBarrierStubC2::ref_addr() const {
+ return _ref_addr;
+}
+
+Register ZLoadBarrierStubC2::ref() const {
+ return _ref;
+}
+
+Register ZLoadBarrierStubC2::tmp() const {
+ return _tmp;
+}
+
+address ZLoadBarrierStubC2::slow_path() const {
+ const DecoratorSet decorators = _weak ? ON_WEAK_OOP_REF : ON_STRONG_OOP_REF;
+ return ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators);
}
-void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
- // Change to that barrier may affect a dominated barrier so re-push those
- assert(!is_weak(), "sanity");
- Node* val = in(LoadBarrierNode::Oop);
+RegMask& ZLoadBarrierStubC2::live() const {
+ return *barrier_set_state()->live(_node);
+}
+
+Label* ZLoadBarrierStubC2::entry() {
+ // The _entry will never be bound when in_scratch_emit_size() is true.
+ // However, we still need to return a label that is not bound now, but
+ // will eventually be bound. Any lable will do, as it will only act as
+ // a placeholder, so we return the _continuation label.
+ return Compile::current()->in_scratch_emit_size() ? &_continuation : &_entry;
+}
+
+Label* ZLoadBarrierStubC2::continuation() {
+ return &_continuation;
+}
- for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
- Node* u = val->fast_out(i);
- if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
- Node* this_ctrl = in(Control);
- Node* other_ctrl = u->in(Control);
- if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
- igvn->_worklist.push(u);
- }
+void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
+ return new (comp_arena) ZBarrierSetC2State(comp_arena);
+}
+
+void ZBarrierSetC2::late_barrier_analysis() const {
+ analyze_dominating_barriers();
+ compute_liveness_at_stubs();
+}
+
+void ZBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
+ MacroAssembler masm(&cb);
+ GrowableArray<ZLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
+
+ for (int i = 0; i < stubs->length(); i++) {
+ // Make sure there is enough space in the code buffer
+ if (cb.insts()->maybe_expand_to_ensure_remaining(Compile::MAX_inst_size) && cb.blob() == NULL) {
+ ciEnv::current()->record_failure("CodeCache is full");
+ return;
}
- Node* addr = in(LoadBarrierNode::Address);
- for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
- Node* u = addr->fast_out(i);
- if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
- Node* this_ctrl = in(Control);
- Node* other_ctrl = u->in(Control);
- if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
- igvn->_worklist.push(u);
- }
- }
- }
- }
-}
-
-Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
- LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
- if (dominating_barrier != NULL) {
- assert(!is_weak(), "Weak barriers cant be eliminated");
- assert(dominating_barrier->in(Oop) == in(Oop), "");
- return dominating_barrier;
- }
-
- return this;
-}
-
-Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
- if (remove_dead_region(phase, can_reshape)) {
- return this;
+ ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
}
- Node *val = in(Oop);
- Node *mem = in(Memory);
- Node *ctrl = in(Control);
-
- assert(val->Opcode() != Op_LoadN, "");
- assert(val->Opcode() != Op_DecodeN, "");
-
- if (mem->is_MergeMem()) {
- Node *new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
- set_req(Memory, new_mem);
- if (mem->outcnt() == 0 && can_reshape) {
- phase->is_IterGVN()->_worklist.push(mem);
- }
- return this;
- }
+ masm.flush();
+}
- LoadBarrierNode *dominating_barrier = NULL;
- if (!is_weak()) {
- dominating_barrier = has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress());
- if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
- assert(in(Address) == dominating_barrier->in(Address), "");
- set_req(Similar, dominating_barrier->proj_out(Oop));
- return this;
- }
- }
-
- bool eliminate = can_reshape && (dominating_barrier != NULL || !has_true_uses());
- if (eliminate) {
- if (can_reshape) {
- PhaseIterGVN* igvn = phase->is_IterGVN();
- Node* out_ctrl = proj_out_or_null(Control);
- Node* out_res = proj_out_or_null(Oop);
+int ZBarrierSetC2::estimate_stub_size() const {
+ Compile* const C = Compile::current();
+ BufferBlob* const blob = C->scratch_buffer_blob();
+ GrowableArray<ZLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
+ int size = 0;
- if (out_ctrl != NULL) {
- igvn->replace_node(out_ctrl, ctrl);
- }
-
- // That transformation may cause the Similar edge on the load barrier to be invalid
- fix_similar_in_uses(igvn);
- if (out_res != NULL) {
- if (dominating_barrier != NULL) {
- assert(!is_weak(), "Sanity");
- igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
- } else {
- igvn->replace_node(out_res, val);
- }
- }
- }
- return new ConINode(TypeInt::ZERO);
+ for (int i = 0; i < stubs->length(); i++) {
+ CodeBuffer cb(blob->content_begin(), (address)C->scratch_locs_memory() - blob->content_begin());
+ MacroAssembler masm(&cb);
+ ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
+ size += cb.insts_size();
}
- // If the Similar edge is no longer a load barrier, clear it
- Node* similar = in(Similar);
- if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
- set_req(Similar, phase->C->top());
- return this;
- }
-
- if (can_reshape && !is_weak()) {
- // If this barrier is linked through the Similar edge by a
- // dominated barrier and both barriers have the same Oop field,
- // the dominated barrier can go away, so push it for reprocessing.
- // We also want to avoid a barrier to depend on another dominating
- // barrier through its Similar edge that itself depend on another
- // barrier through its Similar edge and rather have the first
- // depend on the third.
- PhaseIterGVN* igvn = phase->is_IterGVN();
- Node* out_res = proj_out(Oop);
- for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
- Node* u = out_res->fast_out(i);
- if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
- (u->in(Oop) == val || !u->in(Similar)->is_top())) {
- assert(!u->as_LoadBarrier()->is_weak(), "Sanity");
- igvn->_worklist.push(u);
- }
- }
- push_dominated_barriers(igvn);
- }
-
- return NULL;
-}
-
-uint LoadBarrierNode::match_edge(uint idx) const {
- ShouldNotReachHere();
- return 0;
-}
-
-void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
- Node* out_res = proj_out_or_null(Oop);
- if (out_res == NULL) {
- return;
- }
-
- for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
- Node* u = out_res->fast_out(i);
- if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
- igvn->replace_input_of(u, Similar, igvn->C->top());
- --i;
- --imax;
- }
- }
-}
-
-bool LoadBarrierNode::has_true_uses() const {
- Node* out_res = proj_out_or_null(Oop);
- if (out_res != NULL) {
- for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
- Node *u = out_res->fast_out(i);
- if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
- return true;
- }
- }
- }
- return false;
+ return size;
}
static bool barrier_needed(C2Access& access) {
@@ -474,1223 +180,252 @@
}
Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
- Node* p = BarrierSetC2::load_at_resolved(access, val_type);
- if (!barrier_needed(access)) {
- return p;
+ Node* result = BarrierSetC2::load_at_resolved(access, val_type);
+ if (barrier_needed(access) && access.raw_access()->is_Mem()) {
+ if ((access.decorators() & ON_WEAK_OOP_REF) != 0) {
+ access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierWeak);
+ } else {
+ access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierStrong);
+ }
}
- bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
- if (p->isa_Load()) {
- load_set_barrier(p->as_Load(), weak);
- }
- return p;
+ return result;
}
Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* val_type) const {
Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
- LoadStoreNode* lsn = result->as_LoadStore();
if (barrier_needed(access)) {
- lsn->set_has_barrier();
+ access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
}
- return lsn;
+ return result;
}
Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
Node* new_val, const Type* value_type) const {
Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
- LoadStoreNode* lsn = result->as_LoadStore();
if (barrier_needed(access)) {
- lsn->set_has_barrier();
+ access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
}
- return lsn;
+ return result;
}
Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
- LoadStoreNode* lsn = result->as_LoadStore();
if (barrier_needed(access)) {
- lsn->set_has_barrier();
+ access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
}
- return lsn;
+ return result;
}
-// == Macro Expansion ==
-
-// Optimized, low spill, loadbarrier variant using stub specialized on register used
-void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
- PhaseIterGVN &igvn = phase->igvn();
- float unlikely = PROB_UNLIKELY(0.999);
-
- Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
- Node* in_mem = barrier->in(LoadBarrierNode::Memory);
- Node* in_val = barrier->in(LoadBarrierNode::Oop);
- Node* in_adr = barrier->in(LoadBarrierNode::Address);
-
- Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
- Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
-
- assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
-
- Node* jthread = igvn.transform(new ThreadLocalNode());
- Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
- Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
- TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
- MemNode::unordered));
- Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
- Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
- Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
- Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
- IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
- Node* then = igvn.transform(new IfTrueNode(iff));
- Node* elsen = igvn.transform(new IfFalseNode(iff));
-
- Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_adr, in_val,
- (const TypePtr*) in_val->bottom_type(), barrier->is_weak()));
-
- // Create the final region/phi pair to converge cntl/data paths to downstream code
- Node* result_region = igvn.transform(new RegionNode(3));
- result_region->set_req(1, then);
- result_region->set_req(2, elsen);
-
- Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
- result_phi->set_req(1, new_loadp);
- result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
-
- igvn.replace_node(out_ctrl, result_region);
- igvn.replace_node(out_res, result_phi);
-
- assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
-
- igvn.remove_dead_node(barrier);
- igvn.remove_dead_node(out_ctrl);
- igvn.remove_dead_node(out_res);
-
- assert(is_gc_barrier_node(result_phi), "sanity");
- assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
-
- phase->C->print_method(PHASE_BARRIER_EXPANSION, 4, barrier->_idx);
+bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type,
+ bool is_clone, ArrayCopyPhase phase) const {
+ return type == T_OBJECT || type == T_ARRAY;
}
-bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
- ZBarrierSetC2State* s = state();
- if (s->load_barrier_count() > 0) {
- PhaseMacroExpand macro(igvn);
+// == Dominating barrier elision ==
- int skipped = 0;
- while (s->load_barrier_count() > skipped) {
- int load_barrier_count = s->load_barrier_count();
- LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
- if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
- // Node is unreachable, so don't try to expand it
- s->remove_load_barrier_node(n);
- continue;
- }
- if (!n->can_be_eliminated()) {
- skipped++;
- continue;
- }
- expand_loadbarrier_node(¯o, n);
- assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
- if (C->failing()) {
- return true;
- }
- }
- while (s->load_barrier_count() > 0) {
- int load_barrier_count = s->load_barrier_count();
- LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
- assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
- assert(!n->can_be_eliminated(), "should have been processed already");
- expand_loadbarrier_node(¯o, n);
- assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
- if (C->failing()) {
- return true;
- }
- }
- igvn.set_delay_transform(false);
- igvn.optimize();
- if (C->failing()) {
+static bool block_has_safepoint(const Block* block, uint from, uint to) {
+ for (uint i = from; i < to; i++) {
+ if (block->get_node(i)->is_MachSafePoint()) {
+ // Safepoint found
return true;
}
}
+ // Safepoint not found
return false;
}
-Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
- Node* node = c;
+static bool block_has_safepoint(const Block* block) {
+ return block_has_safepoint(block, 0, block->number_of_nodes());
+}
- // 1. This step follows potential oop projections of a load barrier before expansion
- if (node->is_Proj()) {
- node = node->in(0);
+static uint block_index(const Block* block, const Node* node) {
+ for (uint j = 0; j < block->number_of_nodes(); ++j) {
+ if (block->get_node(j) == node) {
+ return j;
+ }
}
+ ShouldNotReachHere();
+ return 0;
+}
+
+void ZBarrierSetC2::analyze_dominating_barriers() const {
+ ResourceMark rm;
+ Compile* const C = Compile::current();
+ PhaseCFG* const cfg = C->cfg();
+ Block_List worklist;
+ Node_List mem_ops;
+ Node_List barrier_loads;
- // 2. This step checks for unexpanded load barriers
- if (node->is_LoadBarrier()) {
- return node->in(LoadBarrierNode::Oop);
- }
+ // Step 1 - Find accesses, and track them in lists
+ for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
+ const Block* const block = cfg->get_block(i);
+ for (uint j = 0; j < block->number_of_nodes(); ++j) {
+ const Node* const node = block->get_node(j);
+ if (!node->is_Mach()) {
+ continue;
+ }
- // 3. This step checks for the phi corresponding to an optimized load barrier expansion
- if (node->is_Phi()) {
- PhiNode* phi = node->as_Phi();
- Node* n = phi->in(1);
- if (n != NULL && n->is_LoadBarrierSlowReg()) {
- assert(c == node, "projections from step 1 should only be seen before macro expansion");
- return phi->in(2);
+ MachNode* const mach = node->as_Mach();
+ switch (mach->ideal_Opcode()) {
+ case Op_LoadP:
+ case Op_CompareAndExchangeP:
+ case Op_CompareAndSwapP:
+ case Op_GetAndSetP:
+ if (mach->barrier_data() == ZLoadBarrierStrong) {
+ barrier_loads.push(mach);
+ }
+ case Op_StoreP:
+ mem_ops.push(mach);
+ break;
+
+ default:
+ break;
+ }
}
}
- return c;
-}
-
-Node* ZBarrierSetC2::step_over_gc_barrier_ctrl(Node* c) const {
- Node* node = c;
-
- // 1. This step follows potential ctrl projections of a load barrier before expansion
- if (node->is_Proj()) {
- node = node->in(0);
- }
-
- // 2. This step checks for unexpanded load barriers
- if (node->is_LoadBarrier()) {
- return node->in(LoadBarrierNode::Control);
- }
-
- return c;
-}
-
-bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const {
- return is_reference_type(type);
-}
-
-bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
- switch (opcode) {
- case Op_LoadBarrier:
- assert(0, "There should be no load barriers left");
- case Op_ZGetAndSetP:
- case Op_ZCompareAndExchangeP:
- case Op_ZCompareAndSwapP:
- case Op_ZWeakCompareAndSwapP:
-#ifdef ASSERT
- if (VerifyOptoOopOffsets) {
- MemNode *mem = n->as_Mem();
- // Check to see if address types have grounded out somehow.
- const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
- ciInstanceKlass *k = tp->klass()->as_instance_klass();
- bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
- assert(!tp || oop_offset_is_sane, "");
- }
-#endif
- return true;
- default:
- return false;
- }
-}
+ // Step 2 - Find dominating accesses for each load
+ for (uint i = 0; i < barrier_loads.size(); i++) {
+ MachNode* const load = barrier_loads.at(i)->as_Mach();
+ const TypePtr* load_adr_type = NULL;
+ intptr_t load_offset = 0;
+ const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type);
+ Block* const load_block = cfg->get_block_for_node(load);
+ const uint load_index = block_index(load_block, load);
-bool ZBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
- switch(opcode) {
- case Op_CallLeaf:
- if (n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() ||
- n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr()) {
- mem_op = true;
- mem_addr_idx = TypeFunc::Parms + 1;
- return true;
- }
- return false;
- default:
- return false;
- }
-}
-
-bool ZBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
- switch(opcode) {
- case Op_ZCompareAndExchangeP:
- case Op_ZCompareAndSwapP:
- case Op_ZWeakCompareAndSwapP: {
- Node *mem = n->in(MemNode::Address);
- Node *keepalive = n->in(5);
- Node *pair1 = new BinaryNode(mem, keepalive);
-
- Node *newval = n->in(MemNode::ValueIn);
- Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
- Node *pair2 = new BinaryNode(oldval, newval);
+ for (uint j = 0; j < mem_ops.size(); j++) {
+ MachNode* mem = mem_ops.at(j)->as_Mach();
+ const TypePtr* mem_adr_type = NULL;
+ intptr_t mem_offset = 0;
+ const Node* mem_obj = mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type);
+ Block* mem_block = cfg->get_block_for_node(mem);
+ uint mem_index = block_index(mem_block, mem);
- n->set_req(MemNode::Address, pair1);
- n->set_req(MemNode::ValueIn, pair2);
- n->del_req(5);
- n->del_req(LoadStoreConditionalNode::ExpectedIn);
- return true;
- }
- case Op_ZGetAndSetP: {
- Node *keepalive = n->in(4);
- Node *newval = n->in(MemNode::ValueIn);
- Node *pair = new BinaryNode(newval, keepalive);
- n->set_req(MemNode::ValueIn, pair);
- n->del_req(4);
- return true;
- }
+ if (load_obj == NodeSentinel || mem_obj == NodeSentinel ||
+ load_obj == NULL || mem_obj == NULL ||
+ load_offset < 0 || mem_offset < 0) {
+ continue;
+ }
- default:
- return false;
- }
-}
-
-// == Verification ==
-
-#ifdef ASSERT
-
-static void verify_slippery_safepoints_internal(Node* ctrl) {
- // Given a CFG node, make sure it does not contain both safepoints and loads
- // that have expanded barriers.
- bool found_safepoint = false;
- bool found_load = false;
+ if (mem_obj != load_obj || mem_offset != load_offset) {
+ // Not the same addresses, not a candidate
+ continue;
+ }
- for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
- Node* node = ctrl->fast_out(i);
- if (node->in(0) != ctrl) {
- // Skip outgoing precedence edges from ctrl.
- continue;
- }
- if (node->is_SafePoint()) {
- found_safepoint = true;
- }
- if (node->is_Load() && load_require_barrier(node->as_Load()) &&
- load_has_expanded_barrier(node->as_Load())) {
- found_load = true;
- }
- }
- assert(!found_safepoint || !found_load, "found load and safepoint in same block");
-}
-
-static void verify_slippery_safepoints(Compile* C) {
- ResourceArea *area = Thread::current()->resource_area();
- Unique_Node_List visited(area);
- Unique_Node_List checked(area);
-
- // Recursively walk the graph.
- visited.push(C->root());
- while (visited.size() > 0) {
- Node* node = visited.pop();
-
- Node* ctrl = node;
- if (!node->is_CFG()) {
- ctrl = node->in(0);
- }
-
- if (ctrl != NULL && !checked.member(ctrl)) {
- // For each block found in the graph, verify that it does not
- // contain both a safepoint and a load requiring barriers.
- verify_slippery_safepoints_internal(ctrl);
-
- checked.push(ctrl);
- }
-
- checked.push(node);
-
- for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
- Node* use = node->fast_out(i);
- if (checked.member(use)) continue;
- if (visited.member(use)) continue;
- visited.push(use);
- }
- }
-}
+ if (load_block == mem_block) {
+ // Earlier accesses in the same block
+ if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) {
+ load->set_barrier_data(ZLoadBarrierElided);
+ }
+ } else if (mem_block->dominates(load_block)) {
+ // Dominating block? Look around for safepoints
+ ResourceMark rm;
+ Block_List stack;
+ VectorSet visited(Thread::current()->resource_area());
+ stack.push(load_block);
+ bool safepoint_found = block_has_safepoint(load_block);
+ while (!safepoint_found && stack.size() > 0) {
+ Block* block = stack.pop();
+ if (visited.test_set(block->_pre_order)) {
+ continue;
+ }
+ if (block_has_safepoint(block)) {
+ safepoint_found = true;
+ break;
+ }
+ if (block == mem_block) {
+ continue;
+ }
-void ZBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
- switch(phase) {
- case BarrierSetC2::BeforeOptimize:
- case BarrierSetC2::BeforeLateInsertion:
- assert(state()->load_barrier_count() == 0, "No barriers inserted yet");
- break;
- case BarrierSetC2::BeforeMacroExpand:
- // Barrier placement should be set by now.
- verify_gc_barriers(false /*post_parse*/);
- break;
- case BarrierSetC2::BeforeCodeGen:
- // Barriers has been fully expanded.
- assert(state()->load_barrier_count() == 0, "No more macro barriers");
- verify_slippery_safepoints(compile);
- break;
- default:
- assert(0, "Phase without verification");
- }
-}
-
-// post_parse implies that there might be load barriers without uses after parsing
-// That only applies when adding barriers at parse time.
-void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
- ZBarrierSetC2State* s = state();
- Compile* C = Compile::current();
- ResourceMark rm;
- VectorSet visited(Thread::current()->resource_area());
-
- for (int i = 0; i < s->load_barrier_count(); i++) {
- LoadBarrierNode* n = s->load_barrier_node(i);
-
- // The dominating barrier on the same address if it exists and
- // this barrier must not be applied on the value from the same
- // load otherwise the value is not reloaded before it's used the
- // second time.
- assert(n->in(LoadBarrierNode::Similar)->is_top() ||
- (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
- n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
- n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
- "broken similar edge");
-
- assert(n->as_LoadBarrier()->has_true_uses(),
- "found unneeded load barrier");
-
- // Several load barrier nodes chained through their Similar edge
- // break the code that remove the barriers in final graph reshape.
- assert(n->in(LoadBarrierNode::Similar)->is_top() ||
- (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
- n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
- "chain of Similar load barriers");
-
- if (!n->in(LoadBarrierNode::Similar)->is_top()) {
- ResourceMark rm;
- Unique_Node_List wq;
- Node* other = n->in(LoadBarrierNode::Similar)->in(0);
- wq.push(n);
- for (uint next = 0; next < wq.size(); ++next) {
- Node *nn = wq.at(next);
- assert(nn->is_CFG(), "");
- assert(!nn->is_SafePoint(), "");
-
- if (nn == other) {
- continue;
+ // Push predecessor blocks
+ for (uint p = 1; p < block->num_preds(); ++p) {
+ Block* pred = cfg->get_block_for_node(block->pred(p));
+ stack.push(pred);
+ }
}
- if (nn->is_Region()) {
- for (uint i = 1; i < nn->req(); i++) {
- Node* m = nn->in(i);
- if (m != NULL) {
- wq.push(m);
- }
- }
- } else {
- Node* m = nn->in(0);
- if (m != NULL) {
- wq.push(m);
- }
+ if (!safepoint_found) {
+ load->set_barrier_data(ZLoadBarrierElided);
}
}
}
}
}
-#endif // end verification code
-
-// If a call is the control, we actually want its control projection
-static Node* normalize_ctrl(Node* node) {
- if (node->is_Call()) {
- node = node->as_Call()->proj_out(TypeFunc::Control);
- }
- return node;
-}
-
-static Node* get_ctrl_normalized(PhaseIdealLoop *phase, Node* node) {
- return normalize_ctrl(phase->get_ctrl(node));
-}
-
-static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl);
-
-// This code is cloning all uses of a load that is between a call and the catch blocks,
-// to each use.
-
-static bool fixup_uses_in_catch(PhaseIdealLoop *phase, Node *start_ctrl, Node *node) {
-
- if (!phase->has_ctrl(node)) {
- // This node is floating - doesn't need to be cloned.
- assert(node != start_ctrl, "check");
- return false;
- }
-
- Node* ctrl = get_ctrl_normalized(phase, node);
- if (ctrl != start_ctrl) {
- // We are in a successor block - the node is ok.
- return false; // Unwind
- }
-
- // Process successor nodes
- int outcnt = node->outcnt();
- for (int i = 0; i < outcnt; i++) {
- Node* n = node->raw_out(0);
- assert(!n->is_LoadBarrier(), "Sanity");
- // Calling recursively, visiting leafs first
- fixup_uses_in_catch(phase, start_ctrl, n);
- }
-
- // Now all successors are outside
- // - Clone this node to both successors
- assert(!node->is_Store(), "Stores not expected here");
-
- // In some very rare cases a load that doesn't need a barrier will end up here
- // Treat it as a LoadP and the insertion of phis will be done correctly.
- if (node->is_Load()) {
- call_catch_cleanup_one(phase, node->as_Load(), phase->get_ctrl(node));
- } else {
- for (DUIterator_Fast jmax, i = node->fast_outs(jmax); i < jmax; i++) {
- Node* use = node->fast_out(i);
- Node* clone = node->clone();
- assert(clone->outcnt() == 0, "");
+// == Reduced spilling optimization ==
- assert(use->find_edge(node) != -1, "check");
- phase->igvn().rehash_node_delayed(use);
- use->replace_edge(node, clone);
-
- Node* new_ctrl;
- if (use->is_block_start()) {
- new_ctrl = use;
- } else if (use->is_CFG()) {
- new_ctrl = use->in(0);
- assert (new_ctrl != NULL, "");
- } else {
- new_ctrl = get_ctrl_normalized(phase, use);
- }
-
- phase->set_ctrl(clone, new_ctrl);
-
- if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr(" Clone op %i as %i to control %i", node->_idx, clone->_idx, new_ctrl->_idx);
- phase->igvn().register_new_node_with_optimizer(clone);
- --i, --jmax;
- }
- assert(node->outcnt() == 0, "must be empty now");
-
- // Node node is dead.
- phase->igvn().remove_dead_node(node);
- }
- return true; // unwind - return if a use was processed
-}
+void ZBarrierSetC2::compute_liveness_at_stubs() const {
+ ResourceMark rm;
+ Compile* const C = Compile::current();
+ Arena* const A = Thread::current()->resource_area();
+ PhaseCFG* const cfg = C->cfg();
+ PhaseRegAlloc* const regalloc = C->regalloc();
+ RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
+ ZBarrierSetAssembler* const bs = ZBarrierSet::assembler();
+ Block_List worklist;
-// Clone a load to a specific catch_proj
-static Node* clone_load_to_catchproj(PhaseIdealLoop* phase, Node* load, Node* catch_proj) {
- Node* cloned_load = load->clone();
- cloned_load->set_req(0, catch_proj); // set explicit control
- phase->set_ctrl(cloned_load, catch_proj); // update
- if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr(" Clone LOAD %i as %i to control %i", load->_idx, cloned_load->_idx, catch_proj->_idx);
- phase->igvn().register_new_node_with_optimizer(cloned_load);
- return cloned_load;
-}
-
-static Node* get_dominating_region(PhaseIdealLoop* phase, Node* node, Node* stop) {
- Node* region = node;
- while (!region->isa_Region()) {
- Node *up = phase->idom(region);
- assert(up != region, "Must not loop");
- assert(up != stop, "Must not find original control");
- region = up;
- }
- return region;
-}
-
-// Clone this load to each catch block
-static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) {
- bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
- phase->igvn().set_delay_transform(true);
-
- // Verify pre conditions
- assert(ctrl->isa_Proj() && ctrl->in(0)->isa_Call(), "Must be a call proj");
- assert(ctrl->raw_out(0)->isa_Catch(), "Must be a catch");
-
- if (ctrl->raw_out(0)->isa_Catch()->outcnt() == 1) {
- if (trace) tty->print_cr("Cleaning up catch: Skipping load %i, call with single catch", load->_idx);
- return;
+ for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
+ new ((void*)(live + i)) RegMask();
+ worklist.push(cfg->get_block(i));
}
- // Process the loads successor nodes - if any is between
- // the call and the catch blocks, they need to be cloned to.
- // This is done recursively
- for (uint i = 0; i < load->outcnt();) {
- Node *n = load->raw_out(i);
- assert(!n->is_LoadBarrier(), "Sanity");
- if (!fixup_uses_in_catch(phase, ctrl, n)) {
- // if no successor was cloned, progress to next out.
- i++;
- }
- }
-
- // Now all the loads uses has been cloned down
- // Only thing left is to clone the loads, but they must end up
- // first in the catch blocks.
-
- // We clone the loads oo the catch blocks only when needed.
- // An array is used to map the catch blocks to each lazily cloned load.
- // In that way no extra unnecessary loads are cloned.
-
- // Any use dominated by original block must have an phi and a region added
-
- Node* catch_node = ctrl->raw_out(0);
- int number_of_catch_projs = catch_node->outcnt();
- Node** proj_to_load_mapping = NEW_RESOURCE_ARRAY(Node*, number_of_catch_projs);
- Copy::zero_to_bytes(proj_to_load_mapping, sizeof(Node*) * number_of_catch_projs);
-
- // The phi_map is used to keep track of where phis have already been inserted
- int phi_map_len = phase->C->unique();
- Node** phi_map = NEW_RESOURCE_ARRAY(Node*, phi_map_len);
- Copy::zero_to_bytes(phi_map, sizeof(Node*) * phi_map_len);
+ while (worklist.size() > 0) {
+ const Block* const block = worklist.pop();
+ RegMask& old_live = live[block->_pre_order];
+ RegMask new_live;
- for (unsigned int i = 0; i < load->outcnt(); i++) {
- Node* load_use_control = NULL;
- Node* load_use = load->raw_out(i);
-
- if (phase->has_ctrl(load_use)) {
- load_use_control = get_ctrl_normalized(phase, load_use);
- assert(load_use_control != ctrl, "sanity");
- } else {
- load_use_control = load_use->in(0);
- }
- assert(load_use_control != NULL, "sanity");
- if (trace) tty->print_cr(" Handling use: %i, with control: %i", load_use->_idx, load_use_control->_idx);
-
- // Some times the loads use is a phi. For them we need to determine from which catch block
- // the use is defined.
- bool load_use_is_phi = false;
- unsigned int load_use_phi_index = 0;
- Node* phi_ctrl = NULL;
- if (load_use->is_Phi()) {
- // Find phi input that matches load
- for (unsigned int u = 1; u < load_use->req(); u++) {
- if (load_use->in(u) == load) {
- load_use_is_phi = true;
- load_use_phi_index = u;
- assert(load_use->in(0)->is_Region(), "Region or broken");
- phi_ctrl = load_use->in(0)->in(u);
- assert(phi_ctrl->is_CFG(), "check");
- assert(phi_ctrl != load, "check");
- break;
- }
- }
- assert(load_use_is_phi, "must find");
- assert(load_use_phi_index > 0, "sanity");
+ // Initialize to union of successors
+ for (uint i = 0; i < block->_num_succs; i++) {
+ const uint succ_id = block->_succs[i]->_pre_order;
+ new_live.OR(live[succ_id]);
}
- // For each load use, see which catch projs dominates, create load clone lazily and reconnect
- bool found_dominating_catchproj = false;
- for (int c = 0; c < number_of_catch_projs; c++) {
- Node* catchproj = catch_node->raw_out(c);
- assert(catchproj != NULL && catchproj->isa_CatchProj(), "Sanity");
-
- if (!phase->is_dominator(catchproj, load_use_control)) {
- if (load_use_is_phi && phase->is_dominator(catchproj, phi_ctrl)) {
- // The loads use is local to the catchproj.
- // fall out and replace load with catch-local load clone.
- } else {
- continue;
- }
- }
- assert(!found_dominating_catchproj, "Max one should match");
-
- // Clone loads to catch projs
- Node* load_clone = proj_to_load_mapping[c];
- if (load_clone == NULL) {
- load_clone = clone_load_to_catchproj(phase, load, catchproj);
- proj_to_load_mapping[c] = load_clone;
- }
- phase->igvn().rehash_node_delayed(load_use);
+ // Walk block backwards, computing liveness
+ for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
+ const Node* const node = block->get_node(i);
- if (load_use_is_phi) {
- // phis are special - the load is defined from a specific control flow
- load_use->set_req(load_use_phi_index, load_clone);
- } else {
- // Multipe edges can be replaced at once - on calls for example
- load_use->replace_edge(load, load_clone);
+ // Remove def bits
+ const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
+ const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
+ if (first != OptoReg::Bad) {
+ new_live.Remove(first);
}
- --i; // more than one edge can have been removed, but the next is in later iterations
-
- // We could break the for-loop after finding a dominating match.
- // But keep iterating to catch any bad idom early.
- found_dominating_catchproj = true;
- }
+ if (second != OptoReg::Bad) {
+ new_live.Remove(second);
+ }
- // We found no single catchproj that dominated the use - The use is at a point after
- // where control flow from multiple catch projs have merged. We will have to create
- // phi nodes before the use and tie the output from the cloned loads together. It
- // can be a single phi or a number of chained phis, depending on control flow
- if (!found_dominating_catchproj) {
-
- // Use phi-control if use is a phi
- if (load_use_is_phi) {
- load_use_control = phi_ctrl;
- }
- assert(phase->is_dominator(ctrl, load_use_control), "Common use but no dominator");
-
- // Clone a load on all paths
- for (int c = 0; c < number_of_catch_projs; c++) {
- Node* catchproj = catch_node->raw_out(c);
- Node* load_clone = proj_to_load_mapping[c];
- if (load_clone == NULL) {
- load_clone = clone_load_to_catchproj(phase, load, catchproj);
- proj_to_load_mapping[c] = load_clone;
+ // Add use bits
+ for (uint j = 1; j < node->req(); ++j) {
+ const Node* const use = node->in(j);
+ const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
+ const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
+ if (first != OptoReg::Bad) {
+ new_live.Insert(first);
+ }
+ if (second != OptoReg::Bad) {
+ new_live.Insert(second);
}
}
- // Move up dominator tree from use until dom front is reached
- Node* next_region = get_dominating_region(phase, load_use_control, ctrl);
- while (phase->idom(next_region) != catch_node) {
- next_region = phase->idom(next_region);
- if (trace) tty->print_cr("Moving up idom to region ctrl %i", next_region->_idx);
- }
- assert(phase->is_dominator(catch_node, next_region), "Sanity");
-
- // Create or reuse phi node that collect all cloned loads and feed it to the use.
- Node* test_phi = phi_map[next_region->_idx];
- if ((test_phi != NULL) && test_phi->is_Phi()) {
- // Reuse an already created phi
- if (trace) tty->print_cr(" Using cached Phi %i on load_use %i", test_phi->_idx, load_use->_idx);
- phase->igvn().rehash_node_delayed(load_use);
- load_use->replace_edge(load, test_phi);
- // Now this use is done
- } else {
- // Otherwise we need to create one or more phis
- PhiNode* next_phi = new PhiNode(next_region, load->type());
- phi_map[next_region->_idx] = next_phi; // cache new phi
- phase->igvn().rehash_node_delayed(load_use);
- load_use->replace_edge(load, next_phi);
-
- int dominators_of_region = 0;
- do {
- // New phi, connect to region and add all loads as in.
- Node* region = next_region;
- assert(region->isa_Region() && region->req() > 2, "Catch dead region nodes");
- PhiNode* new_phi = next_phi;
-
- if (trace) tty->print_cr("Created Phi %i on load %i with control %i", new_phi->_idx, load->_idx, region->_idx);
-
- // Need to add all cloned loads to the phi, taking care that the right path is matched
- dominators_of_region = 0; // reset for new region
- for (unsigned int reg_i = 1; reg_i < region->req(); reg_i++) {
- Node* region_pred = region->in(reg_i);
- assert(region_pred->is_CFG(), "check");
- bool pred_has_dominator = false;
- for (int c = 0; c < number_of_catch_projs; c++) {
- Node* catchproj = catch_node->raw_out(c);
- if (phase->is_dominator(catchproj, region_pred)) {
- new_phi->set_req(reg_i, proj_to_load_mapping[c]);
- if (trace) tty->print_cr(" - Phi in(%i) set to load %i", reg_i, proj_to_load_mapping[c]->_idx);
- pred_has_dominator = true;
- dominators_of_region++;
- break;
- }
- }
-
- // Sometimes we need to chain several phis.
- if (!pred_has_dominator) {
- assert(dominators_of_region <= 1, "More than one region can't require extra phi");
- if (trace) tty->print_cr(" - Region %i pred %i not dominated by catch proj", region->_idx, region_pred->_idx);
- // Continue search on on this region_pred
- // - walk up to next region
- // - create a new phi and connect to first new_phi
- next_region = get_dominating_region(phase, region_pred, ctrl);
-
- // Lookup if there already is a phi, create a new otherwise
- Node* test_phi = phi_map[next_region->_idx];
- if ((test_phi != NULL) && test_phi->is_Phi()) {
- next_phi = test_phi->isa_Phi();
- dominators_of_region++; // record that a match was found and that we are done
- if (trace) tty->print_cr(" Using cached phi Phi %i on control %i", next_phi->_idx, next_region->_idx);
- } else {
- next_phi = new PhiNode(next_region, load->type());
- phi_map[next_region->_idx] = next_phi;
- }
- new_phi->set_req(reg_i, next_phi);
- }
- }
-
- new_phi->set_req(0, region);
- phase->igvn().register_new_node_with_optimizer(new_phi);
- phase->set_ctrl(new_phi, region);
-
- assert(dominators_of_region != 0, "Must have found one this iteration");
- } while (dominators_of_region == 1);
- }
- --i;
- }
- } // end of loop over uses
-
- assert(load->outcnt() == 0, "All uses should be handled");
- phase->igvn().remove_dead_node(load);
- phase->C->print_method(PHASE_CALL_CATCH_CLEANUP, 4, load->_idx);
-
- // Now we should be home
- phase->igvn().set_delay_transform(false);
-}
-
-// Sort out the loads that are between a call ant its catch blocks
-static void process_catch_cleanup_candidate(PhaseIdealLoop* phase, LoadNode* load, bool verify) {
- bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
-
- Node* ctrl = get_ctrl_normalized(phase, load);
- if (!ctrl->is_Proj() || (ctrl->in(0) == NULL) || !ctrl->in(0)->isa_Call()) {
- return;
- }
-
- Node* catch_node = ctrl->isa_Proj()->raw_out(0);
- if (catch_node->is_Catch()) {
- if (catch_node->outcnt() > 1) {
- assert(!verify, "All loads should already have been moved");
- call_catch_cleanup_one(phase, load, ctrl);
- } else {
- if (trace) tty->print_cr("Call catch cleanup with only one catch: load %i ", load->_idx);
- }
- }
-}
-
-void ZBarrierSetC2::barrier_insertion_phase(Compile* C, PhaseIterGVN& igvn) const {
- PhaseIdealLoop::optimize(igvn, LoopOptsZBarrierInsertion);
- if (C->failing()) return;
-}
-
-bool ZBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
-
- if (mode == LoopOptsZBarrierInsertion) {
- // First make sure all loads between call and catch are moved to the catch block
- clean_catch_blocks(phase);
- DEBUG_ONLY(clean_catch_blocks(phase, true /* verify */);)
-
- // Then expand barriers on all loads
- insert_load_barriers(phase);
-
- // Handle all Unsafe that need barriers.
- insert_barriers_on_unsafe(phase);
-
- phase->C->clear_major_progress();
- return true;
- } else {
- return false;
- }
-}
-
-static bool can_simplify_cas(LoadStoreNode* node) {
- if (node->isa_LoadStoreConditional()) {
- Node *expected_in = node->as_LoadStoreConditional()->in(LoadStoreConditionalNode::ExpectedIn);
- return (expected_in->get_ptr_type() == TypePtr::NULL_PTR);
- } else {
- return false;
- }
-}
-
-static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* old_node) {
-
- Compile *C = phase->C;
- PhaseIterGVN &igvn = phase->igvn();
- LoadStoreNode* zclone = NULL;
-
- Node *in_ctrl = old_node->in(MemNode::Control);
- Node *in_mem = old_node->in(MemNode::Memory);
- Node *in_adr = old_node->in(MemNode::Address);
- Node *in_val = old_node->in(MemNode::ValueIn);
- const TypePtr *adr_type = old_node->adr_type();
- const TypePtr* load_type = TypeOopPtr::BOTTOM; // The type for the load we are adding
-
- switch (old_node->Opcode()) {
- case Op_CompareAndExchangeP: {
- zclone = new ZCompareAndExchangePNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
- adr_type, old_node->get_ptr_type(), ((CompareAndExchangeNode*)old_node)->order());
- load_type = old_node->bottom_type()->is_ptr();
- break;
- }
- case Op_WeakCompareAndSwapP: {
- if (can_simplify_cas(old_node)) {
- break;
- }
- zclone = new ZWeakCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
- ((CompareAndSwapNode*)old_node)->order());
- adr_type = TypePtr::BOTTOM;
- break;
- }
- case Op_CompareAndSwapP: {
- if (can_simplify_cas(old_node)) {
- break;
- }
- zclone = new ZCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
- ((CompareAndSwapNode*)old_node)->order());
- adr_type = TypePtr::BOTTOM;
- break;
- }
- case Op_GetAndSetP: {
- zclone = new ZGetAndSetPNode(in_ctrl, in_mem, in_adr, in_val, old_node->adr_type(), old_node->get_ptr_type());
- load_type = old_node->bottom_type()->is_ptr();
- break;
- }
- }
- if (zclone != NULL) {
- igvn.register_new_node_with_optimizer(zclone, old_node);
-
- // Make load
- LoadPNode *load = new LoadPNode(NULL, in_mem, in_adr, adr_type, load_type, MemNode::unordered,
- LoadNode::DependsOnlyOnTest);
- load_set_expanded_barrier(load);
- igvn.register_new_node_with_optimizer(load);
- igvn.replace_node(old_node, zclone);
-
- Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, false /* weak */);
- Node *barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
- Node *barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);
-
- igvn.register_new_node_with_optimizer(barrier);
- igvn.register_new_node_with_optimizer(barrier_val);
- igvn.register_new_node_with_optimizer(barrier_ctrl);
-
- // loop over all of in_ctrl usages and move to barrier_ctrl
- for (DUIterator_Last imin, i = in_ctrl->last_outs(imin); i >= imin; --i) {
- Node *use = in_ctrl->last_out(i);
- uint l;
- for (l = 0; use->in(l) != in_ctrl; l++) {}
- igvn.replace_input_of(use, l, barrier_ctrl);
- }
-
- load->set_req(MemNode::Control, in_ctrl);
- barrier->set_req(LoadBarrierNode::Control, in_ctrl);
- zclone->add_req(barrier_val); // add req as keep alive.
-
- C->print_method(PHASE_ADD_UNSAFE_BARRIER, 4, zclone->_idx);
- }
-}
-
-void ZBarrierSetC2::insert_barriers_on_unsafe(PhaseIdealLoop* phase) const {
- Compile *C = phase->C;
- PhaseIterGVN &igvn = phase->igvn();
- uint new_ids = C->unique();
- VectorSet visited(Thread::current()->resource_area());
- GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
- nodeStack.push(C->root());
- visited.test_set(C->root()->_idx);
-
- // Traverse all nodes, visit all unsafe ops that require a barrier
- while (nodeStack.length() > 0) {
- Node *n = nodeStack.pop();
-
- bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup
- if (is_old_node) {
- if (n->is_LoadStore()) {
- LoadStoreNode* lsn = n->as_LoadStore();
- if (lsn->has_barrier()) {
- BasicType bt = lsn->in(MemNode::Address)->bottom_type()->basic_type();
- assert (is_reference_type(bt), "Sanity test");
- insert_barrier_before_unsafe(phase, lsn);
- }
- }
- }
- for (uint i = 0; i < n->len(); i++) {
- if (n->in(i)) {
- if (!visited.test_set(n->in(i)->_idx)) {
- nodeStack.push(n->in(i));
- }
- }
- }
- }
-
- igvn.optimize();
- C->print_method(PHASE_ADD_UNSAFE_BARRIER, 2);
-}
-
-// The purpose of ZBarrierSetC2::clean_catch_blocks is to prepare the IR for
-// splicing in load barrier nodes.
-//
-// The problem is that we might have instructions between a call and its catch nodes.
-// (This is usually handled in PhaseCFG:call_catch_cleanup, which clones mach nodes in
-// already scheduled blocks.) We can't have loads that require barriers there,
-// because we need to splice in new control flow, and that would violate the IR.
-//
-// clean_catch_blocks find all Loads that require a barrier and clone them and any
-// dependent instructions to each use. The loads must be in the beginning of the catch block
-// before any store.
-//
-// Sometimes the loads use will be at a place dominated by all catch blocks, then we need
-// a load in each catch block, and a Phi at the dominated use.
-
-void ZBarrierSetC2::clean_catch_blocks(PhaseIdealLoop* phase, bool verify) const {
-
- Compile *C = phase->C;
- uint new_ids = C->unique();
- PhaseIterGVN &igvn = phase->igvn();
- VectorSet visited(Thread::current()->resource_area());
- GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
- nodeStack.push(C->root());
- visited.test_set(C->root()->_idx);
-
- // Traverse all nodes, visit all loads that require a barrier
- while(nodeStack.length() > 0) {
- Node *n = nodeStack.pop();
-
- for (uint i = 0; i < n->len(); i++) {
- if (n->in(i)) {
- if (!visited.test_set(n->in(i)->_idx)) {
- nodeStack.push(n->in(i));
- }
+ // If this node tracks liveness, update it
+ RegMask* const regs = barrier_set_state()->live(node);
+ if (regs != NULL) {
+ regs->OR(new_live);
}
}
- bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup
- if (n->is_Load() && is_old_node) {
- LoadNode* load = n->isa_Load();
- // only care about loads that will have a barrier
- if (load_require_barrier(load)) {
- process_catch_cleanup_candidate(phase, load, verify);
- }
- }
- }
-
- C->print_method(PHASE_CALL_CATCH_CLEANUP, 2);
-}
-
-class DomDepthCompareClosure : public CompareClosure<LoadNode*> {
- PhaseIdealLoop* _phase;
-
-public:
- DomDepthCompareClosure(PhaseIdealLoop* phase) : _phase(phase) { }
-
- int do_compare(LoadNode* const &n1, LoadNode* const &n2) {
- int d1 = _phase->dom_depth(_phase->get_ctrl(n1));
- int d2 = _phase->dom_depth(_phase->get_ctrl(n2));
- if (d1 == d2) {
- // Compare index if the depth is the same, ensures all entries are unique.
- return n1->_idx - n2->_idx;
- } else {
- return d2 - d1;
- }
- }
-};
-
-// Traverse graph and add all loadPs to list, sorted by dom depth
-void gather_loadnodes_sorted(PhaseIdealLoop* phase, GrowableArray<LoadNode*>* loadList) {
-
- VectorSet visited(Thread::current()->resource_area());
- GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
- DomDepthCompareClosure ddcc(phase);
-
- nodeStack.push(phase->C->root());
- while(nodeStack.length() > 0) {
- Node *n = nodeStack.pop();
- if (visited.test(n->_idx)) {
- continue;
- }
-
- if (n->isa_Load()) {
- LoadNode *load = n->as_Load();
- if (load_require_barrier(load)) {
- assert(phase->get_ctrl(load) != NULL, "sanity");
- assert(phase->dom_depth(phase->get_ctrl(load)) != 0, "sanity");
- loadList->insert_sorted(&ddcc, load);
- }
- }
-
- visited.set(n->_idx);
- for (uint i = 0; i < n->req(); i++) {
- if (n->in(i)) {
- if (!visited.test(n->in(i)->_idx)) {
- nodeStack.push(n->in(i));
- }
+ // Now at block top, see if we have any changes
+ new_live.SUBTRACT(old_live);
+ if (new_live.is_NotEmpty()) {
+ // Liveness has refined, update and propagate to prior blocks
+ old_live.OR(new_live);
+ for (uint i = 1; i < block->num_preds(); ++i) {
+ Block* const pred = cfg->get_block_for_node(block->pred(i));
+ worklist.push(pred);
}
}
}
}
-
-// Add LoadBarriers to all LoadPs
-void ZBarrierSetC2::insert_load_barriers(PhaseIdealLoop* phase) const {
-
- bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
- GrowableArray<LoadNode *> loadList(Thread::current()->resource_area(), 0, 0, NULL);
- gather_loadnodes_sorted(phase, &loadList);
-
- PhaseIterGVN &igvn = phase->igvn();
- int count = 0;
-
- for (GrowableArrayIterator<LoadNode *> loadIter = loadList.begin(); loadIter != loadList.end(); ++loadIter) {
- LoadNode *load = *loadIter;
-
- if (load_has_expanded_barrier(load)) {
- continue;
- }
-
- do {
- // Insert a barrier on a loadP
- // if another load is found that needs to be expanded first, retry on that one
- LoadNode* result = insert_one_loadbarrier(phase, load, phase->get_ctrl(load));
- while (result != NULL) {
- result = insert_one_loadbarrier(phase, result, phase->get_ctrl(result));
- }
- } while (!load_has_expanded_barrier(load));
- }
-
- phase->C->print_method(PHASE_INSERT_BARRIER, 2);
-}
-
-void push_antidependent_stores(PhaseIdealLoop* phase, Node_Stack& nodestack, LoadNode* start_load) {
- // push all stores on the same mem, that can_alias
- // Any load found must be handled first
- PhaseIterGVN &igvn = phase->igvn();
- int load_alias_idx = igvn.C->get_alias_index(start_load->adr_type());
-
- Node *mem = start_load->in(1);
- for (DUIterator_Fast imax, u = mem->fast_outs(imax); u < imax; u++) {
- Node *mem_use = mem->fast_out(u);
-
- if (mem_use == start_load) continue;
- if (!mem_use->is_Store()) continue;
- if (!phase->has_ctrl(mem_use)) continue;
- if (phase->get_ctrl(mem_use) != phase->get_ctrl(start_load)) continue;
-
- // add any aliasing store in this block
- StoreNode *store = mem_use->isa_Store();
- const TypePtr *adr_type = store->adr_type();
- if (igvn.C->can_alias(adr_type, load_alias_idx)) {
- nodestack.push(store, 0);
- }
- }
-}
-
-LoadNode* ZBarrierSetC2::insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* start_load, Node* ctrl) const {
- bool trace = phase->C->directive()->ZTraceLoadBarriersOption;
- PhaseIterGVN &igvn = phase->igvn();
-
- // Check for other loadPs at the same loop depth that is reachable by a DFS
- // - if found - return it. It needs to be inserted first
- // - otherwise proceed and insert barrier
-
- VectorSet visited(Thread::current()->resource_area());
- Node_Stack nodestack(100);
-
- nodestack.push(start_load, 0);
- push_antidependent_stores(phase, nodestack, start_load);
-
- while(!nodestack.is_empty()) {
- Node* n = nodestack.node(); // peek
- nodestack.pop();
- if (visited.test(n->_idx)) {
- continue;
- }
-
- if (n->is_Load() && n != start_load && load_require_barrier(n->as_Load()) && !load_has_expanded_barrier(n->as_Load())) {
- // Found another load that needs a barrier in the same block. Must expand later loads first.
- if (trace) tty->print_cr(" * Found LoadP %i on DFS", n->_idx);
- return n->as_Load(); // return node that should be expanded first
- }
-
- if (!phase->has_ctrl(n)) continue;
- if (phase->get_ctrl(n) != phase->get_ctrl(start_load)) continue;
- if (n->is_Phi()) continue;
-
- visited.set(n->_idx);
- // push all children
- for (DUIterator_Fast imax, ii = n->fast_outs(imax); ii < imax; ii++) {
- Node* c = n->fast_out(ii);
- if (c != NULL) {
- nodestack.push(c, 0);
- }
- }
- }
-
- insert_one_loadbarrier_inner(phase, start_load, ctrl, visited);
- return NULL;
-}
-
-void ZBarrierSetC2::insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited2) const {
- PhaseIterGVN &igvn = phase->igvn();
- Compile* C = igvn.C;
- bool trace = C->directive()->ZTraceLoadBarriersOption;
-
- // create barrier
- Node* barrier = new LoadBarrierNode(C, NULL, load->in(LoadNode::Memory), NULL, load->in(LoadNode::Address), load_has_weak_barrier(load));
- Node* barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
- Node* barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);
- ctrl = normalize_ctrl(ctrl);
-
- if (trace) tty->print_cr("Insert load %i with barrier: %i and ctrl : %i", load->_idx, barrier->_idx, ctrl->_idx);
-
- // Splice control
- // - insert barrier control diamond between loads ctrl and ctrl successor on path to block end.
- // - If control successor is a catch, step over to next.
- Node* ctrl_succ = NULL;
- for (DUIterator_Fast imax, j = ctrl->fast_outs(imax); j < imax; j++) {
- Node* tmp = ctrl->fast_out(j);
-
- // - CFG nodes is the ones we are going to splice (1 only!)
- // - Phi nodes will continue to hang from the region node!
- // - self loops should be skipped
- if (tmp->is_Phi() || tmp == ctrl) {
- continue;
- }
-
- if (tmp->is_CFG()) {
- assert(ctrl_succ == NULL, "There can be only one");
- ctrl_succ = tmp;
- continue;
- }
- }
-
- // Now splice control
- assert(ctrl_succ != load, "sanity");
- assert(ctrl_succ != NULL, "Broken IR");
- bool found = false;
- for(uint k = 0; k < ctrl_succ->req(); k++) {
- if (ctrl_succ->in(k) == ctrl) {
- assert(!found, "sanity");
- if (trace) tty->print_cr(" Move CFG ctrl_succ %i to barrier_ctrl", ctrl_succ->_idx);
- igvn.replace_input_of(ctrl_succ, k, barrier_ctrl);
- found = true;
- k--;
- }
- }
-
- // For all successors of ctrl - move all visited to become successors of barrier_ctrl instead
- for (DUIterator_Fast imax, r = ctrl->fast_outs(imax); r < imax; r++) {
- Node* tmp = ctrl->fast_out(r);
- if (tmp->is_SafePoint() || (visited2.test(tmp->_idx) && (tmp != load))) {
- if (trace) tty->print_cr(" Move ctrl_succ %i to barrier_ctrl", tmp->_idx);
- igvn.replace_input_of(tmp, 0, barrier_ctrl);
- --r; --imax;
- }
- }
-
- // Move the loads user to the barrier
- for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
- Node* u = load->fast_out(i);
- if (u->isa_LoadBarrier()) {
- continue;
- }
-
- // find correct input - replace with iterator?
- for(uint j = 0; j < u->req(); j++) {
- if (u->in(j) == load) {
- igvn.replace_input_of(u, j, barrier_val);
- --i; --imax; // Adjust the iterator of the *outer* loop
- break; // some nodes (calls) might have several uses from the same node
- }
- }
- }
-
- // Connect barrier to load and control
- barrier->set_req(LoadBarrierNode::Oop, load);
- barrier->set_req(LoadBarrierNode::Control, ctrl);
-
- igvn.replace_input_of(load, MemNode::Control, ctrl);
- load->pin();
-
- igvn.rehash_node_delayed(load);
- igvn.register_new_node_with_optimizer(barrier);
- igvn.register_new_node_with_optimizer(barrier_val);
- igvn.register_new_node_with_optimizer(barrier_ctrl);
- load_set_expanded_barrier(load);
-
- C->print_method(PHASE_INSERT_BARRIER, 3, load->_idx);
-}
-
-// The bad_mask in the ThreadLocalData shouldn't have an anti-dep-check.
-// The bad_mask address if of type TypeRawPtr, but that will alias
-// InitializeNodes until the type system is expanded.
-bool ZBarrierSetC2::needs_anti_dependence_check(const Node* node) const {
- MachNode* mnode = node->as_Mach();
- if (mnode != NULL) {
- intptr_t offset = 0;
- const TypePtr *adr_type2 = NULL;
- const Node* base = mnode->get_base_and_disp(offset, adr_type2);
- if ((base != NULL) &&
- (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_ThreadLocal) &&
- (offset == in_bytes(ZThreadLocalData::address_bad_mask_offset()))) {
- return false;
- }
- }
- return true;
-}
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -29,134 +29,38 @@
#include "opto/node.hpp"
#include "utilities/growableArray.hpp"
-class ZCompareAndSwapPNode : public CompareAndSwapPNode {
-public:
- ZCompareAndSwapPNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
- virtual int Opcode() const;
-};
-
-class ZWeakCompareAndSwapPNode : public WeakCompareAndSwapPNode {
-public:
- ZWeakCompareAndSwapPNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : WeakCompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
- virtual int Opcode() const;
-};
+const uint8_t ZLoadBarrierStrong = 1;
+const uint8_t ZLoadBarrierWeak = 2;
+const uint8_t ZLoadBarrierElided = 3;
-class ZCompareAndExchangePNode : public CompareAndExchangePNode {
-public:
- ZCompareAndExchangePNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangePNode(c, mem, adr, val, ex, at, t, mem_ord) { }
- virtual int Opcode() const;
-};
+class ZLoadBarrierStubC2 : public ResourceObj {
+private:
+ const MachNode* _node;
+ const Address _ref_addr;
+ const Register _ref;
+ const Register _tmp;
+ const bool _weak;
+ Label _entry;
+ Label _continuation;
-class ZGetAndSetPNode : public GetAndSetPNode {
-public:
- ZGetAndSetPNode(Node* c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t) : GetAndSetPNode(c, mem, adr, val, at, t) { }
- virtual int Opcode() const;
-};
-
-class LoadBarrierNode : public MultiNode {
-private:
- bool _weak; // On strong or weak oop reference
- static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
- void push_dominated_barriers(PhaseIterGVN* igvn) const;
+ ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak);
public:
- enum {
- Control,
- Memory,
- Oop,
- Address,
- Number_of_Outputs = Address,
- Similar,
- Number_of_Inputs
- };
-
- LoadBarrierNode(Compile* C,
- Node* c,
- Node* mem,
- Node* val,
- Node* adr,
- bool weak);
-
- virtual int Opcode() const;
- virtual uint size_of() const;
- virtual bool cmp(const Node& n) const;
- virtual const Type *bottom_type() const;
- virtual const TypePtr* adr_type() const;
- virtual const Type *Value(PhaseGVN *phase) const;
- virtual Node *Identity(PhaseGVN *phase);
- virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
- virtual uint match_edge(uint idx) const;
-
- LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
- bool linear_only,
- bool look_for_similar);
-
- void fix_similar_in_uses(PhaseIterGVN* igvn);
-
- bool has_true_uses() const;
-
- bool can_be_eliminated() const {
- return !in(Similar)->is_top();
- }
-
- bool is_weak() const {
- return _weak;
- }
-};
+ static ZLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak);
-class LoadBarrierSlowRegNode : public TypeNode {
-private:
- bool _is_weak;
-public:
- LoadBarrierSlowRegNode(Node *c,
- Node *adr,
- Node *src,
- const TypePtr* t,
- bool weak) :
- TypeNode(t, 3), _is_weak(weak) {
- init_req(1, adr);
- init_req(2, src);
- init_class_id(Class_LoadBarrierSlowReg);
- }
-
- virtual uint size_of() const {
- return sizeof(*this);
- }
-
- virtual const char * name() {
- return "LoadBarrierSlowRegNode";
- }
-
- virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
- return NULL;
- }
-
- virtual int Opcode() const;
-
- bool is_weak() { return _is_weak; }
-};
-
-class ZBarrierSetC2State : public ResourceObj {
-private:
- // List of load barrier nodes which need to be expanded before matching
- GrowableArray<LoadBarrierNode*>* _load_barrier_nodes;
-
-public:
- ZBarrierSetC2State(Arena* comp_arena);
- int load_barrier_count() const;
- void add_load_barrier_node(LoadBarrierNode* n);
- void remove_load_barrier_node(LoadBarrierNode* n);
- LoadBarrierNode* load_barrier_node(int idx) const;
+ Address ref_addr() const;
+ Register ref() const;
+ Register tmp() const;
+ address slow_path() const;
+ RegMask& live() const;
+ Label* entry();
+ Label* continuation();
};
class ZBarrierSetC2 : public BarrierSetC2 {
private:
- ZBarrierSetC2State* state() const;
- void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
-
-#ifdef ASSERT
- void verify_gc_barriers(bool post_parse) const;
-#endif
+ void compute_liveness_at_stubs() const;
+ void analyze_dominating_barriers() const;
protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
@@ -174,43 +78,14 @@
public:
virtual void* create_barrier_state(Arena* comp_arena) const;
-
- virtual bool has_load_barriers() const { return true; }
- virtual bool is_gc_barrier_node(Node* node) const;
- virtual Node* step_over_gc_barrier(Node* c) const;
- virtual Node* step_over_gc_barrier_ctrl(Node* c) const;
-
- virtual void register_potential_barrier_node(Node* node) const;
- virtual void unregister_potential_barrier_node(Node* node) const;
- virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
- virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const;
- virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const;
-
- virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const;
+ virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc,
+ BasicType type,
+ bool is_clone,
+ ArrayCopyPhase phase) const;
- virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const;
- virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const;
- virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const;
- virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const;
- virtual bool needs_anti_dependence_check(const Node* node) const;
-
-#ifdef ASSERT
- virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
-#endif
-
- // Load barrier insertion and expansion external
- virtual void barrier_insertion_phase(Compile* C, PhaseIterGVN &igvn) const;
- virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const;
- virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return (mode == LoopOptsZBarrierInsertion); }
- virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return mode == LoopOptsZBarrierInsertion; }
-
-private:
- // Load barrier insertion and expansion internal
- void insert_barriers_on_unsafe(PhaseIdealLoop* phase) const;
- void clean_catch_blocks(PhaseIdealLoop* phase, bool verify = false) const;
- void insert_load_barriers(PhaseIdealLoop* phase) const;
- LoadNode* insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) const;
- void insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited) const;
+ virtual void late_barrier_analysis() const;
+ virtual int estimate_stub_size() const;
+ virtual void emit_stubs(CodeBuffer& cb) const;
};
#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
--- a/src/hotspot/share/gc/z/zArguments.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/z/zArguments.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -37,11 +37,6 @@
void ZArguments::initialize() {
GCArguments::initialize();
- // Check max heap size
- if (MaxHeapSize > ZMaxHeapSize) {
- vm_exit_during_initialization("Java heap too large");
- }
-
// Enable NUMA by default
if (FLAG_IS_DEFAULT(UseNUMA)) {
FLAG_SET_DEFAULT(UseNUMA, true);
--- a/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,7 @@
#ifndef SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
#define SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
-#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
-#include "oops/accessDecorators.hpp"
-#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
class ZBarrierSetAssemblerBase : public BarrierSetAssembler {
--- a/src/hotspot/share/gc/z/zGlobals.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/z/zGlobals.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -44,10 +44,6 @@
const size_t ZGranuleSizeShift = ZPlatformGranuleSizeShift;
const size_t ZGranuleSize = (size_t)1 << ZGranuleSizeShift;
-// Max heap size shift/size
-const size_t ZMaxHeapSizeShift = ZPlatformMaxHeapSizeShift;
-const size_t ZMaxHeapSize = (size_t)1 << ZMaxHeapSizeShift;
-
// Page types
const uint8_t ZPageTypeSmall = 0;
const uint8_t ZPageTypeMedium = 1;
--- a/src/hotspot/share/gc/z/zNMethod.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/z/zNMethod.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -294,13 +294,14 @@
return;
}
- ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
-
if (nm->is_unloading()) {
+ ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
unlink(nm);
return;
}
+ ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
+
// Heal oops and disarm
ZNMethodOopClosure cl;
ZNMethod::nmethod_oops_do(nm, &cl);
--- a/src/hotspot/share/gc/z/zVirtualMemory.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -31,6 +31,13 @@
_manager(),
_initialized(false) {
+ // Check max supported heap size
+ if (max_capacity > ZAddressOffsetMax) {
+ log_error(gc)("Java heap too large (max supported heap size is " SIZE_FORMAT "G)",
+ ZAddressOffsetMax / G);
+ return;
+ }
+
log_info(gc, init)("Address Space: " SIZE_FORMAT "T", ZAddressOffsetMax / K / G);
// Reserve address space
--- a/src/hotspot/share/include/jmm.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/include/jmm.h Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,8 +50,9 @@
JMM_VERSION_1_2 = 0x20010200, // JDK 7
JMM_VERSION_1_2_1 = 0x20010201, // JDK 7 GA
JMM_VERSION_1_2_2 = 0x20010202,
- JMM_VERSION_2 = 0x20020000, // JDK 10
- JMM_VERSION = 0x20020000
+ JMM_VERSION_2 = 0x20020000, // JDK 10
+ JMM_VERSION_3 = 0x20030000, // JDK 14
+ JMM_VERSION = JMM_VERSION_3
};
typedef struct {
@@ -239,6 +240,9 @@
jobject (JNICALL *GetMemoryPoolUsage) (JNIEnv* env, jobject pool);
jobject (JNICALL *GetPeakMemoryPoolUsage) (JNIEnv* env, jobject pool);
+ jlong (JNICALL *GetOneThreadAllocatedMemory)
+ (JNIEnv *env,
+ jlong thread_id);
void (JNICALL *GetThreadAllocatedMemory)
(JNIEnv *env,
jlongArray ids,
--- a/src/hotspot/share/include/jvm.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/include/jvm.h Fri Oct 11 12:08:01 2019 +0530
@@ -1044,19 +1044,6 @@
#include "classfile_constants.h"
/*
- * A function defined by the byte-code verifier and called by the VM.
- * This is not a function implemented in the VM.
- *
- * Returns JNI_FALSE if verification fails. A detailed error message
- * will be places in msg_buf, whose length is specified by buf_len.
- */
-typedef jboolean (*verifier_fn_t)(JNIEnv *env,
- jclass cb,
- char * msg_buf,
- jint buf_len);
-
-
-/*
* Support for a VM-independent class format checker.
*/
typedef struct {
@@ -1086,28 +1073,6 @@
typedef jstring (*to_java_string_fn_t)(JNIEnv *env, char *str);
-typedef char *(*to_c_string_fn_t)(JNIEnv *env, jstring s, jboolean *b);
-
-/* This is the function defined in libjava.so that performs class
- * format checks. This functions fills in size information about
- * the class file and returns:
- *
- * 0: good
- * -1: out of memory
- * -2: bad format
- * -3: unsupported version
- * -4: bad class name
- */
-
-typedef jint (*check_format_fn_t)(char *class_name,
- unsigned char *data,
- unsigned int data_size,
- class_size_info *class_size,
- char *message_buffer,
- jint buffer_length,
- jboolean measure_only,
- jboolean check_relaxed);
-
#define JVM_RECOGNIZED_CLASS_MODIFIERS (JVM_ACC_PUBLIC | \
JVM_ACC_FINAL | \
JVM_ACC_SUPER | \
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -28,6 +28,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
@@ -52,7 +53,6 @@
#include "prims/nativeLookup.hpp"
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/frame.inline.hpp"
--- a/src/hotspot/share/interpreter/linkResolver.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/interpreter/linkResolver.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -30,6 +30,7 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bootstrapInfo.hpp"
@@ -48,7 +49,6 @@
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "prims/nativeLookup.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -168,7 +168,7 @@
static bool is_cds_dump_requested() {
// we will not be able to launch recordings if a cds dump is being requested
- if ((DumpSharedSpaces || DynamicDumpSharedSpaces) && (JfrOptionSet::startup_recording_options() != NULL)) {
+ if (Arguments::is_dumping_archive() && (JfrOptionSet::startup_recording_options() != NULL)) {
warning("JFR will be disabled during CDS dumping");
teardown_startup_support();
return true;
--- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -393,6 +393,10 @@
Service_lock->unlock();
}
+ if (UseNotificationThread && Notification_lock->owned_by_self()) {
+ Notification_lock->unlock();
+ }
+
if (CodeCache_lock->owned_by_self()) {
CodeCache_lock->unlock();
}
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -25,11 +25,11 @@
#include "aot/aotLoader.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "interpreter/linkResolver.hpp"
#include "jvmci/compilerRuntime.hpp"
#include "oops/cpCache.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -31,6 +31,7 @@
#include "jvmci/vmStructs_jvmci.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.hpp"
+#include "oops/klass.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/resourceHash.hpp"
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -44,6 +44,7 @@
_failure_reason_on_C_heap(false) {
// Get Jvmti capabilities under lock to get consistent values.
MutexLocker mu(JvmtiThreadState_lock);
+ _jvmti_redefinition_count = JvmtiExport::redefinition_count();
_jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint() ? 1 : 0;
_jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables() ? 1 : 0;
_jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions() ? 1 : 0;
@@ -51,6 +52,10 @@
}
bool JVMCICompileState::jvmti_state_changed() const {
+ // Some classes were redefined
+ if (jvmti_redefinition_count() != JvmtiExport::redefinition_count()) {
+ return true;
+ }
if (!jvmti_can_access_local_variables() &&
JvmtiExport::can_access_local_variables()) {
return true;
--- a/src/hotspot/share/jvmci/jvmciEnv.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/jvmci/jvmciEnv.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -94,6 +94,7 @@
// Cache JVMTI state. Defined as bytes so that reading them from Java
// via Unsafe is well defined (the C++ type for bool is implementation
// defined and may not be the same as a Java boolean).
+ uint64_t _jvmti_redefinition_count;
jbyte _jvmti_can_hotswap_or_post_breakpoint;
jbyte _jvmti_can_access_local_variables;
jbyte _jvmti_can_post_on_exceptions;
@@ -113,6 +114,7 @@
CompileTask* task() { return _task; }
bool jvmti_state_changed() const;
+ uint64_t jvmti_redefinition_count() const { return _jvmti_redefinition_count; }
bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint != 0; }
bool jvmti_can_access_local_variables() const { return _jvmti_can_access_local_variables != 0; }
bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions != 0; }
--- a/src/hotspot/share/logging/logSelectionList.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/logging/logSelectionList.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/memory/filemap.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/memory/filemap.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -263,7 +263,7 @@
void SharedClassPathEntry::init(bool is_modules_image,
ClassPathEntry* cpe, TRAPS) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
+ Arguments::assert_is_dumping_archive();
_timestamp = 0;
_filesize = 0;
_from_class_path_attr = false;
@@ -397,7 +397,7 @@
}
void FileMapInfo::allocate_shared_path_table() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "Sanity");
+ Arguments::assert_is_dumping_archive();
EXCEPTION_MARK; // The following calls should never throw, but would exit VM on error.
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
@@ -444,7 +444,7 @@
}
void FileMapInfo::check_nonempty_dir_in_shared_path_table() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
+ Arguments::assert_is_dumping_archive();
bool has_nonempty_dir = false;
@@ -471,7 +471,7 @@
}
void FileMapInfo::record_non_existent_class_path_entry(const char* path) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
+ Arguments::assert_is_dumping_archive();
log_info(class, path)("non-existent Class-Path entry %s", path);
if (_non_existent_class_paths == NULL) {
_non_existent_class_paths = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<const char*>(10, true);
@@ -480,7 +480,7 @@
}
int FileMapInfo::num_non_existent_class_paths() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
+ Arguments::assert_is_dumping_archive();
if (_non_existent_class_paths != NULL) {
return _non_existent_class_paths->length();
} else {
@@ -1150,7 +1150,7 @@
void FileMapInfo::write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "Dump time only");
+ Arguments::assert_is_dumping_archive();
FileMapRegion* si = space_at(region);
char* target_base = base;
--- a/src/hotspot/share/memory/metaspaceShared.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/memory/metaspaceShared.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -425,7 +425,7 @@
}
void MetaspaceShared::commit_shared_space_to(char* newtop) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only");
+ Arguments::assert_is_dumping_archive();
char* base = _shared_rs.base();
size_t need_committed_size = newtop - base;
size_t has_committed_size = _shared_vs.committed_size();
@@ -509,8 +509,7 @@
}
uintx MetaspaceShared::object_delta_uintx(void* obj) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
- "supported only for dumping");
+ Arguments::assert_is_dumping_archive();
if (DumpSharedSpaces) {
assert(shared_rs()->contains(obj), "must be");
} else {
--- a/src/hotspot/share/memory/universe.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/memory/universe.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -710,7 +710,7 @@
}
#if INCLUDE_CDS
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
MetaspaceShared::prepare_for_dumping();
}
#endif
--- a/src/hotspot/share/oops/constMethod.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/constMethod.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,7 @@
#define SHARE_OOPS_CONSTMETHOD_HPP
#include "oops/oop.hpp"
+#include "runtime/arguments.hpp"
#include "utilities/align.hpp"
// An ConstMethod represents portions of a Java method which are not written to after
@@ -293,7 +294,7 @@
_adapter = adapter;
}
void set_adapter_trampoline(AdapterHandlerEntry** trampoline) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "must be");
+ Arguments::assert_is_dumping_archive();
if (DumpSharedSpaces) {
assert(*trampoline == NULL,
"must be NULL during dump time, to be initialized at run time");
--- a/src/hotspot/share/oops/cpCache.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/cpCache.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -708,7 +708,7 @@
}
void ConstantPoolCache::walk_entries_for_initialization(bool check_only) {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "sanity");
+ Arguments::assert_is_dumping_archive();
// When dumping the archive, we want to clean up the ConstantPoolCache
// to remove any effect of linking due to the execution of Java code --
// each ConstantPoolCacheEntry will have the same contents as if
--- a/src/hotspot/share/oops/instanceKlass.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/instanceKlass.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -453,7 +453,7 @@
assert(is_instance_klass(), "is layout incorrect?");
assert(size_helper() == parser.layout_size(), "incorrect size_helper?");
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
SystemDictionaryShared::init_dumptime_info(this);
}
}
@@ -603,7 +603,7 @@
}
set_annotations(NULL);
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
SystemDictionaryShared::remove_dumptime_info(this);
}
}
@@ -1101,7 +1101,7 @@
void InstanceKlass::set_implementor(Klass* k) {
- assert_lock_strong(Compile_lock);
+ assert_locked_or_safepoint(Compile_lock);
assert(is_interface(), "not interface");
Klass* volatile* addr = adr_implementor();
assert(addr != NULL, "null addr");
@@ -2229,7 +2229,7 @@
// (1) We are running AOT to generate a shared library.
return true;
}
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
// (2) We are running -Xshare:dump or -XX:ArchiveClassesAtExit to create a shared archive
return true;
}
@@ -2333,8 +2333,8 @@
// being added to class hierarchy (see SystemDictionary:::add_to_hierarchy()).
_init_state = allocated;
- {
- MutexLocker ml(Compile_lock);
+ { // Otherwise this needs to take out the Compile_lock.
+ assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
init_implementor();
}
@@ -2477,7 +2477,7 @@
// notify ClassLoadingService of class unload
ClassLoadingService::notify_class_unloaded(ik);
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
SystemDictionaryShared::remove_dumptime_info(ik);
}
--- a/src/hotspot/share/oops/instanceKlass.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/instanceKlass.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -992,7 +992,6 @@
void process_interfaces(Thread *thread);
// virtual operations from Klass
- bool is_leaf_class() const { return _subklass == NULL; }
GrowableArray<Klass*>* compute_secondary_supers(int num_extra_slots,
Array<InstanceKlass*>* transitive_interfaces);
bool can_be_primary_super_slow() const;
--- a/src/hotspot/share/oops/klass.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/klass.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -57,10 +57,6 @@
_java_mirror = class_loader_data()->add_handle(m);
}
-oop Klass::java_mirror() const {
- return _java_mirror.resolve();
-}
-
oop Klass::java_mirror_no_keepalive() const {
return _java_mirror.peek();
}
@@ -525,7 +521,7 @@
}
void Klass::remove_unshareable_info() {
- assert (DumpSharedSpaces || DynamicDumpSharedSpaces,
+ assert (Arguments::is_dumping_archive(),
"only called during CDS dump time");
JFR_ONLY(REMOVE_ID(this);)
if (log_is_enabled(Trace, cds, unshareable)) {
@@ -543,7 +539,7 @@
}
void Klass::remove_java_mirror() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only called during CDS dump time");
+ Arguments::assert_is_dumping_archive();
if (log_is_enabled(Trace, cds, unshareable)) {
ResourceMark rm;
log_trace(cds, unshareable)("remove java_mirror: %s", external_name());
@@ -681,8 +677,6 @@
}
}
-oop Klass::class_loader() const { return class_loader_data()->class_loader(); }
-
// In product mode, this function doesn't have virtual function calls so
// there might be some performance advantage to handling InstanceKlass here.
const char* Klass::external_name() const {
@@ -826,14 +820,6 @@
return ClassLoaderDataGraph::is_valid(k->class_loader_data());
}
-klassVtable Klass::vtable() const {
- return klassVtable(const_cast<Klass*>(this), start_of_vtable(), vtable_length() / vtableEntry::size());
-}
-
-vtableEntry* Klass::start_of_vtable() const {
- return (vtableEntry*) ((address)this + in_bytes(vtable_start_offset()));
-}
-
Method* Klass::method_at_vtable(int index) {
#ifndef PRODUCT
assert(index >= 0, "valid vtable index");
@@ -844,9 +830,6 @@
return start_of_vtable()[index].method();
}
-ByteSize Klass::vtable_start_offset() {
- return in_ByteSize(InstanceKlass::header_size() * wordSize);
-}
#ifndef PRODUCT
--- a/src/hotspot/share/oops/klass.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/klass.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -469,8 +469,6 @@
virtual bool should_be_initialized() const { return false; }
// initializes the klass
virtual void initialize(TRAPS);
- // lookup operation for MethodLookupCache
- friend class MethodLookupCache;
virtual Klass* find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const;
virtual Method* uncached_lookup_method(const Symbol* name, const Symbol* signature,
OverpassLookupMode overpass_mode,
@@ -537,9 +535,6 @@
}
public:
- // subclass accessor (here for convenience; undefined for non-klass objects)
- virtual bool is_leaf_class() const { fatal("not a class"); return false; }
- public:
// ALL FUNCTIONS BELOW THIS POINT ARE DISPATCHED FROM AN OOP
// These functions describe behavior for the oop not the KLASS.
--- a/src/hotspot/share/oops/klass.inline.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/klass.inline.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -25,13 +25,35 @@
#ifndef SHARE_OOPS_KLASS_INLINE_HPP
#define SHARE_OOPS_KLASS_INLINE_HPP
+#include "classfile/classLoaderData.inline.hpp"
#include "oops/compressedOops.hpp"
#include "oops/klass.hpp"
#include "oops/markWord.hpp"
+#include "oops/oopHandle.inline.hpp"
inline void Klass::set_prototype_header(markWord header) {
assert(!header.has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
_prototype_header = header;
}
+inline oop Klass::java_mirror() const {
+ return _java_mirror.resolve();
+}
+
+inline klassVtable Klass::vtable() const {
+ return klassVtable(const_cast<Klass*>(this), start_of_vtable(), vtable_length() / vtableEntry::size());
+}
+
+inline oop Klass::class_loader() const {
+ return class_loader_data()->class_loader();
+}
+
+inline vtableEntry* Klass::start_of_vtable() const {
+ return (vtableEntry*) ((address)this + in_bytes(vtable_start_offset()));
+}
+
+inline ByteSize Klass::vtable_start_offset() {
+ return in_ByteSize(InstanceKlass::header_size() * wordSize);
+}
+
#endif // SHARE_OOPS_KLASS_INLINE_HPP
--- a/src/hotspot/share/oops/klassVtable.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/klassVtable.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -128,11 +128,6 @@
*vtable_length_ret = vtable_length;
}
-int klassVtable::index_of(Method* m, int len) const {
- assert(m->has_vtable_index(), "do not ask this of non-vtable methods");
- return m->vtable_index();
-}
-
// Copy super class's vtable to the first part (prefix) of this class's vtable,
// and return the number of entries copied. Expects that 'super' is the Java
// super class (arrays can have "array" super classes that must be skipped).
@@ -169,7 +164,6 @@
// Note: Arrays can have intermediate array supers. Use java_super to skip them.
InstanceKlass* super = _klass->java_super();
- int nofNewEntries = 0;
bool is_shared = _klass->is_shared();
@@ -1029,15 +1023,6 @@
}
#endif // INCLUDE_JVMTI
-// CDS/RedefineClasses support - clear vtables so they can be reinitialized
-void klassVtable::clear_vtable() {
- for (int i = 0; i < _length; i++) table()[i].clear();
-}
-
-bool klassVtable::is_initialized() {
- return _length == 0 || table()[0].method() != NULL;
-}
-
//-----------------------------------------------------------------------------------------
// Itable code
@@ -1468,31 +1453,6 @@
#endif
}
-
-// inverse to itable_index
-Method* klassItable::method_for_itable_index(InstanceKlass* intf, int itable_index) {
- assert(intf->is_interface(), "sanity check");
- assert(intf->verify_itable_index(itable_index), "");
- Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
-
- if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
- return NULL; // help caller defend against bad indices
-
- int index = itable_index;
- Method* m = methods->at(index);
- int index2 = -1;
- while (!m->has_itable_index() ||
- (index2 = m->itable_index()) != itable_index) {
- assert(index2 < itable_index, "monotonic");
- if (++index == methods->length())
- return NULL;
- m = methods->at(index);
- }
- assert(m->itable_index() == itable_index, "correct inverse");
-
- return m;
-}
-
void klassVtable::verify(outputStream* st, bool forced) {
// make sure table is initialized
if (!Universe::is_fully_initialized()) return;
--- a/src/hotspot/share/oops/klassVtable.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/klassVtable.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -48,13 +48,6 @@
int _verify_count; // to make verify faster
#endif
- // Ordering important, so greater_than (>) can be used as an merge operator.
- enum AccessType {
- acc_private = 0,
- acc_package_private = 1,
- acc_publicprotected = 2
- };
-
public:
klassVtable(Klass* klass, void* base, int length) : _klass(klass) {
_tableOffset = (address)base - (address)klass; _length = length;
@@ -66,22 +59,12 @@
int length() const { return _length; }
inline Method* method_at(int i) const;
inline Method* unchecked_method_at(int i) const;
- inline Method** adr_method_at(int i) const;
// searching; all methods return -1 if not found
- int index_of(Method* m) const { return index_of(m, _length); }
int index_of_miranda(Symbol* name, Symbol* signature);
void initialize_vtable(bool checkconstraints, TRAPS); // initialize vtable of a new klass
- // CDS/RedefineClasses support - clear vtables so they can be reinitialized
- // at dump time. Clearing gives us an easy way to tell if the vtable has
- // already been reinitialized at dump time (see dump.cpp). Vtables can
- // be initialized at run time by RedefineClasses so dumping the right order
- // is necessary.
- void clear_vtable();
- bool is_initialized();
-
// computes vtable length (in words) and the number of miranda methods
static void compute_vtable_size_and_num_mirandas(int* vtable_length,
int* num_new_mirandas,
@@ -125,7 +108,6 @@
private:
void copy_vtable_to(vtableEntry* start);
int initialize_from_super(Klass* super);
- int index_of(Method* m, int len) const; // same as index_of, but search only up to len
void put_method_at(Method* m, int index);
static bool needs_new_vtable_entry(const methodHandle& m,
const Klass* super,
@@ -223,12 +205,6 @@
return table()[i].method();
}
-inline Method** klassVtable::adr_method_at(int i) const {
- // Allow one past the last entry to be referenced; useful for loop bounds.
- assert(i >= 0 && i <= _length, "index out of bounds");
- return (Method**)(address(table() + i) + vtableEntry::method_offset_in_bytes());
-}
-
// --------------------------------------------------------------------------------
class klassItable;
class itableMethodEntry;
@@ -333,9 +309,6 @@
static int compute_itable_size(Array<InstanceKlass*>* transitive_interfaces);
static void setup_itable_offset_table(InstanceKlass* klass);
- // Resolving of method to index
- static Method* method_for_itable_index(InstanceKlass* klass, int itable_index);
-
// Debugging/Statistics
static void print_statistics() PRODUCT_RETURN;
private:
--- a/src/hotspot/share/oops/method.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/method.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -28,6 +28,7 @@
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "code/debugInfoRec.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/bytecodeTracer.hpp"
@@ -54,7 +55,6 @@
#include "prims/methodHandles.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
@@ -979,7 +979,7 @@
void Method::unlink_method() {
_code = NULL;
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump time only");
+ Arguments::assert_is_dumping_archive();
// Set the values to what they should be at run time. Note that
// this Method can no longer be executed during dump time.
_i2i_entry = Interpreter::entry_for_cds_method(this);
--- a/src/hotspot/share/oops/methodData.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/methodData.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/compilerOracle.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/bytecodeStream.hpp"
@@ -34,7 +35,6 @@
#include "oops/methodData.inline.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/orderAccess.hpp"
@@ -1212,7 +1212,7 @@
// Initialize the MethodData* corresponding to a given method.
MethodData::MethodData(const methodHandle& method, int size, TRAPS)
- : _extra_data_lock(Monitor::leaf, "MDO extra data lock"),
+ : _extra_data_lock(Mutex::leaf, "MDO extra data lock"),
_parameters_type_data_di(parameters_uninitialized) {
// Set the method back-pointer.
_method = method();
--- a/src/hotspot/share/oops/methodData.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/oops/methodData.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -2011,7 +2011,7 @@
MethodData(const methodHandle& method, int size, TRAPS);
public:
static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
- MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
+ MethodData() : _extra_data_lock(Mutex::leaf, "MDO extra data lock") {}; // For ciMethodData
bool is_methodData() const volatile { return true; }
void initialize();
--- a/src/hotspot/share/opto/buildOopMap.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/buildOopMap.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -352,7 +352,6 @@
} else {
// Other - some reaching non-oop value
- omap->set_value( r);
#ifdef ASSERT
if( t->isa_rawptr() && C->cfg()->_raw_oops.member(def) ) {
def->dump();
@@ -377,11 +376,18 @@
#endif
#ifdef ASSERT
- for( OopMapStream oms1(omap, OopMapValue::derived_oop_value); !oms1.is_done(); oms1.next()) {
+ for( OopMapStream oms1(omap); !oms1.is_done(); oms1.next()) {
OopMapValue omv1 = oms1.current();
+ if (omv1.type() != OopMapValue::derived_oop_value) {
+ continue;
+ }
bool found = false;
- for( OopMapStream oms2(omap,OopMapValue::oop_value); !oms2.is_done(); oms2.next()) {
- if( omv1.content_reg() == oms2.current().reg() ) {
+ for( OopMapStream oms2(omap); !oms2.is_done(); oms2.next()) {
+ OopMapValue omv2 = oms2.current();
+ if (omv2.type() != OopMapValue::oop_value) {
+ continue;
+ }
+ if( omv1.content_reg() == omv2.reg() ) {
found = true;
break;
}
--- a/src/hotspot/share/opto/c2compiler.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/c2compiler.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -102,7 +102,8 @@
assert(is_initialized(), "Compiler thread must be initialized");
bool subsume_loads = SubsumeLoads;
- bool do_escape_analysis = DoEscapeAnalysis && !env->should_retain_local_variables();
+ bool do_escape_analysis = DoEscapeAnalysis && !env->should_retain_local_variables()
+ && !env->jvmti_can_get_owned_monitor_info();
bool eliminate_boxing = EliminateAutoBox;
while (!env->failing()) {
@@ -462,6 +463,11 @@
case vmIntrinsics::_writebackPostSync0:
if (!Matcher::match_rule_supported(Op_CacheWBPostSync)) return false;
break;
+ case vmIntrinsics::_rint:
+ case vmIntrinsics::_ceil:
+ case vmIntrinsics::_floor:
+ if (!Matcher::match_rule_supported(Op_RoundDoubleMode)) return false;
+ break;
case vmIntrinsics::_hashCode:
case vmIntrinsics::_identityHashCode:
case vmIntrinsics::_getClass:
--- a/src/hotspot/share/opto/callnode.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/callnode.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1397,6 +1397,18 @@
_is_allocation_MemBar_redundant = true;
}
}
+Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
+ Node* mark_node = NULL;
+ // For now only enable fast locking for non-array types
+ if (UseBiasedLocking && Opcode() == Op_Allocate) {
+ Node* klass_node = in(AllocateNode::KlassNode);
+ Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
+ mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+ } else {
+ mark_node = phase->MakeConX(markWord::prototype().value());
+ }
+ return mark_node;
+}
//=============================================================================
Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
--- a/src/hotspot/share/opto/callnode.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/callnode.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -936,6 +936,8 @@
// allocation node.
void compute_MemBar_redundancy(ciMethod* initializer);
bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
+
+ Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
};
//------------------------------AllocateArray---------------------------------
--- a/src/hotspot/share/opto/classes.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/classes.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -48,9 +48,6 @@
#include "opto/subnode.hpp"
#include "opto/vectornode.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_ZGC
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#endif
#if INCLUDE_SHENANDOAHGC
#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
#endif
--- a/src/hotspot/share/opto/classes.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/classes.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -193,17 +193,6 @@
macro(LoadN)
macro(LoadRange)
macro(LoadS)
-#if INCLUDE_ZGC
-#define zgcmacro(x) macro(x)
-#else
-#define zgcmacro(x) optionalmacro(x)
-#endif
-zgcmacro(LoadBarrier)
-zgcmacro(LoadBarrierSlowReg)
-zgcmacro(ZCompareAndSwapP)
-zgcmacro(ZWeakCompareAndSwapP)
-zgcmacro(ZCompareAndExchangeP)
-zgcmacro(ZGetAndSetP)
macro(Lock)
macro(Loop)
macro(LoopLimit)
@@ -274,6 +263,8 @@
macro(Return)
macro(Root)
macro(RoundDouble)
+macro(RoundDoubleMode)
+macro(RoundDoubleModeV)
macro(RoundFloat)
macro(SafePoint)
macro(SafePointScalarObject)
--- a/src/hotspot/share/opto/compile.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/compile.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -76,9 +76,6 @@
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_ZGC
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#endif
// -------------------- Compile::mach_constant_base_node -----------------------
@@ -990,6 +987,7 @@
_has_method_handle_invokes(false),
_clinit_barrier_on_entry(false),
_comp_arena(mtCompiler),
+ _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
_env(ci_env),
_directive(directive),
_log(ci_env->log()),
@@ -2412,13 +2410,6 @@
print_method(PHASE_MACRO_EXPANSION, 2);
}
-#ifdef ASSERT
- bs->verify_gc_barriers(this, BarrierSetC2::BeforeLateInsertion);
-#endif
-
- bs->barrier_insertion_phase(C, igvn);
- if (failing()) return;
-
{
TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
if (bs->expand_barriers(this, igvn)) {
--- a/src/hotspot/share/opto/compile.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/compile.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -55,7 +55,6 @@
class IdealGraphPrinter;
class InlineTree;
class Int_Array;
-class LoadBarrierNode;
class Matcher;
class MachConstantNode;
class MachConstantBaseNode;
@@ -96,7 +95,6 @@
LoopOptsNone,
LoopOptsShenandoahExpand,
LoopOptsShenandoahPostExpand,
- LoopOptsZBarrierInsertion,
LoopOptsSkipSplitIf,
LoopOptsVerify
};
@@ -1186,11 +1184,7 @@
bool in_scratch_emit_size() const { return _in_scratch_emit_size; }
enum ScratchBufferBlob {
-#if defined(PPC64)
MAX_inst_size = 2048,
-#else
- MAX_inst_size = 1024,
-#endif
MAX_locs_size = 128, // number of relocInfo elements
MAX_const_size = 128,
MAX_stubs_size = 128
@@ -1265,14 +1259,30 @@
// Process an OopMap Element while emitting nodes
void Process_OopMap_Node(MachNode *mach, int code_offset);
+ class BufferSizingData {
+ public:
+ int _stub;
+ int _code;
+ int _const;
+ int _reloc;
+
+ BufferSizingData() :
+ _stub(0),
+ _code(0),
+ _const(0),
+ _reloc(0)
+ { };
+ };
+
// Initialize code buffer
- CodeBuffer* init_buffer(uint* blk_starts);
+ void estimate_buffer_size(int& const_req);
+ CodeBuffer* init_buffer(BufferSizingData& buf_sizes);
// Write out basic block data to code buffer
void fill_buffer(CodeBuffer* cb, uint* blk_starts);
// Determine which variable sized branches can be shortened
- void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size);
+ void shorten_branches(uint* blk_starts, BufferSizingData& buf_sizes);
// Compute the size of first NumberOfLoopInstrToAlign instructions
// at the head of a loop.
--- a/src/hotspot/share/opto/convertnode.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/convertnode.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -531,4 +531,16 @@
return phase->type( in(1) );
}
-
+//=============================================================================
+//------------------------------Identity---------------------------------------
+// Remove redundant roundings.
+Node* RoundDoubleModeNode::Identity(PhaseGVN* phase) {
+ int op = in(1)->Opcode();
+ // Redundant rounding e.g. floor(ceil(n)) -> ceil(n)
+ if(op == Op_RoundDoubleMode) return in(1);
+ return this;
+}
+const Type* RoundDoubleModeNode::Value(PhaseGVN* phase) const {
+ return Type::DOUBLE;
+}
+//=============================================================================
--- a/src/hotspot/share/opto/convertnode.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/convertnode.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -212,5 +212,16 @@
virtual const Type* Value(PhaseGVN* phase) const;
};
+//-----------------------------RoundDoubleModeNode-----------------------------
+class RoundDoubleModeNode: public Node {
+ public:
+ RoundDoubleModeNode(Node *in1, Node * rmode): Node(0, in1, rmode) {}
+ virtual int Opcode() const;
+ virtual const Type *bottom_type() const { return Type::DOUBLE; }
+ virtual uint ideal_reg() const { return Op_RegD; }
+ virtual Node* Identity(PhaseGVN* phase);
+ virtual const Type* Value(PhaseGVN* phase) const;
+};
+
#endif // SHARE_OPTO_CONVERTNODE_HPP
--- a/src/hotspot/share/opto/library_call.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/library_call.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -534,6 +534,9 @@
case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
case vmIntrinsics::_getClass: return inline_native_getClass();
+ case vmIntrinsics::_ceil:
+ case vmIntrinsics::_floor:
+ case vmIntrinsics::_rint:
case vmIntrinsics::_dsin:
case vmIntrinsics::_dcos:
case vmIntrinsics::_dtan:
@@ -1818,6 +1821,9 @@
switch (id) {
case vmIntrinsics::_dabs: n = new AbsDNode( arg); break;
case vmIntrinsics::_dsqrt: n = new SqrtDNode(C, control(), arg); break;
+ case vmIntrinsics::_ceil: n = new RoundDoubleModeNode(arg, makecon(TypeInt::make(2))); break;
+ case vmIntrinsics::_floor: n = new RoundDoubleModeNode(arg, makecon(TypeInt::make(1))); break;
+ case vmIntrinsics::_rint: n = new RoundDoubleModeNode(arg, makecon(TypeInt::make(0))); break;
default: fatal_unexpected_iid(id); break;
}
set_result(_gvn.transform(n));
@@ -1891,6 +1897,9 @@
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
// These intrinsics are supported on all hardware
+ case vmIntrinsics::_ceil:
+ case vmIntrinsics::_floor:
+ case vmIntrinsics::_rint: return Matcher::match_rule_supported(Op_RoundDoubleMode) ? inline_double_math(id) : false;
case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_double_math(id) : false;
case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_double_math(id) : false;
case vmIntrinsics::_fabs: return Matcher::match_rule_supported(Op_AbsF) ? inline_math(id) : false;
--- a/src/hotspot/share/opto/loopPredicate.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/loopPredicate.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1381,7 +1381,6 @@
} // end while
}
- Node_List if_proj_list_freq(area);
if (follow_branches) {
PathFrequency pf(loop->_head, this);
@@ -1399,6 +1398,7 @@
// And look into all branches
Node_Stack stack(0);
VectorSet seen(Thread::current()->resource_area());
+ Node_List if_proj_list_freq(area);
while (regions.size() > 0) {
Node* c = regions.pop();
loop_predication_follow_branches(c, loop, loop_trip_cnt, pf, stack, seen, if_proj_list_freq);
--- a/src/hotspot/share/opto/loopTransform.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/loopTransform.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -671,76 +671,50 @@
loop->record_for_igvn();
}
-// The Estimated Loop Unroll Size: UnrollFactor * (106% * BodySize + BC) + CC,
-// where BC and CC are (totally) ad-hoc/magic "body" and "clone" constants,
-// respectively, used to ensure that node usage estimates made are on the safe
-// side, for the most part. This is a simplified version of the loop clone
-// size calculation in est_loop_clone_sz(), defined for unroll factors larger
-// than one (>1), performing an overflow check and returning 'UINT_MAX' in
-// case of an overflow.
-static uint est_loop_unroll_sz(uint factor, uint size) {
- precond(0 < factor);
-
- uint const bc = 5;
- uint const cc = 7;
- uint const sz = size + (size + 15) / 16;
- uint estimate = factor * (sz + bc) + cc;
-
- return (estimate - cc) / factor == sz + bc ? estimate : UINT_MAX;
-}
-
-#define EMPTY_LOOP_SIZE 7 // Number of nodes in an empty loop.
-
//------------------------------policy_maximally_unroll------------------------
// Calculate the exact loop trip-count and return TRUE if loop can be fully,
// i.e. maximally, unrolled, otherwise return FALSE. When TRUE, the estimated
// node budget is also requested.
-bool IdealLoopTree::policy_maximally_unroll(PhaseIdealLoop *phase) const {
- CountedLoopNode *cl = _head->as_CountedLoop();
+bool IdealLoopTree::policy_maximally_unroll(PhaseIdealLoop* phase) const {
+ CountedLoopNode* cl = _head->as_CountedLoop();
assert(cl->is_normal_loop(), "");
if (!cl->is_valid_counted_loop()) {
- return false; // Malformed counted loop
+ return false; // Malformed counted loop.
}
if (!cl->has_exact_trip_count()) {
- // Trip count is not exact.
- return false;
+ return false; // Trip count is not exact.
}
uint trip_count = cl->trip_count();
// Note, max_juint is used to indicate unknown trip count.
assert(trip_count > 1, "one iteration loop should be optimized out already");
- assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
+ assert(trip_count < max_juint, "exact trip_count should be less than max_juint.");
// If nodes are depleted, some transform has miscalculated its needs.
assert(!phase->exceeding_node_budget(), "sanity");
- // Real policy: if we maximally unroll, does it get too big?
- // Allow the unrolled mess to get larger than standard loop
- // size. After all, it will no longer be a loop.
- uint body_size = _body.size();
+ // Allow the unrolled body to get larger than the standard loop size limit.
uint unroll_limit = (uint)LoopUnrollLimit * 4;
assert((intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
- if (trip_count > unroll_limit || body_size > unroll_limit) {
+ if (trip_count > unroll_limit || _body.size() > unroll_limit) {
return false;
}
- // Take into account that after unroll conjoined heads and tails will fold,
- // otherwise policy_unroll() may allow more unrolling than max unrolling.
- uint new_body_size = est_loop_unroll_sz(trip_count, body_size - EMPTY_LOOP_SIZE);
+ uint new_body_size = est_loop_unroll_sz(trip_count);
if (new_body_size == UINT_MAX) { // Check for bad estimate (overflow).
return false;
}
- // Fully unroll a loop with few iterations regardless next conditions since
- // following loop optimizations will split such loop anyway (pre-main-post).
+ // Fully unroll a loop with few iterations, regardless of other conditions,
+ // since the following (general) loop optimizations will split such loop in
+ // any case (into pre-main-post).
if (trip_count <= 3) {
return phase->may_require_nodes(new_body_size);
}
- if (new_body_size > unroll_limit ||
- // Unrolling can result in a large amount of node construction
- phase->exceeding_node_budget(new_body_size)) {
+ // Reject if unrolling will result in too much node construction.
+ if (new_body_size > unroll_limit || phase->exceeding_node_budget(new_body_size)) {
return false;
}
--- a/src/hotspot/share/opto/loopnode.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/loopnode.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -993,18 +993,6 @@
}
}
- if (UseZGC && !inner_out->in(0)->is_CountedLoopEnd()) {
- // In some very special cases there can be a load that has no other uses than the
- // counted loop safepoint. Then its loadbarrier will be placed between the inner
- // loop exit and the safepoint. This is very rare
-
- Node* ifnode = inner_out->in(1)->in(0);
- // Region->IfTrue->If == Region->Iffalse->If
- if (ifnode == inner_out->in(2)->in(0)) {
- inner_out = ifnode->in(0);
- }
- }
-
CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
assert(cle == inner->loopexit_or_null(), "mismatch");
bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
@@ -2471,6 +2459,39 @@
assert((estimate - cc) / factor == sz + bc, "overflow");
+ return estimate + est_loop_flow_merge_sz();
+}
+
+// The Estimated Loop (full-) Unroll Size:
+// UnrollFactor * (~106% * BodySize) + CC + FanOutTerm,
+// where CC is a (totally) ad-hoc/magic "clone" constant, used to ensure that
+// node usage estimates made are on the safe side, for the most part. This is
+// a "light" version of the loop clone size calculation (above), based on the
+// assumption that most of the loop-construct overhead will be unraveled when
+// (fully) unrolled. Defined for unroll factors larger or equal to one (>=1),
+// including an overflow check and returning UINT_MAX in case of an overflow.
+uint IdealLoopTree::est_loop_unroll_sz(uint factor) const {
+
+ precond(factor > 0);
+
+ // Take into account that after unroll conjoined heads and tails will fold.
+ uint const b0 = _body.size() - EMPTY_LOOP_SIZE;
+ uint const cc = 7;
+ uint const sz = b0 + (b0 + 15) / 16;
+ uint estimate = factor * sz + cc;
+
+ if ((estimate - cc) / factor != sz) {
+ return UINT_MAX;
+ }
+
+ return estimate + est_loop_flow_merge_sz();
+}
+
+// Estimate the growth effect (in nodes) of merging control and data flow when
+// cloning a loop body, based on the amount of control and data flow reaching
+// outside of the (current) loop body.
+uint IdealLoopTree::est_loop_flow_merge_sz() const {
+
uint ctrl_edge_out_cnt = 0;
uint data_edge_out_cnt = 0;
@@ -2494,24 +2515,21 @@
}
}
}
- // Add data and control count (x2.0) to estimate iff both are > 0. This is
+ // Use data and control count (x2.0) in estimate iff both are > 0. This is
// a rather pessimistic estimate for the most part, in particular for some
// complex loops, but still not enough to capture all loops.
if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) {
- estimate += 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
+ return 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
}
-
- return estimate;
+ return 0;
}
#ifndef PRODUCT
//------------------------------dump_head--------------------------------------
// Dump 1 liner for loop header info
void IdealLoopTree::dump_head() const {
- for (uint i = 0; i < _nest; i++) {
- tty->print(" ");
- }
- tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx);
+ tty->sp(2 * _nest);
+ tty->print("Loop: N%d/N%d ", _head->_idx, _tail->_idx);
if (_irreducible) tty->print(" IRREDUCIBLE");
Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl);
Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
@@ -4019,28 +4037,32 @@
// dominated by early is considered a potentially interfering store.
// This can produce false positives.
if (n->is_Load() && LCA != early) {
- Node_List worklist;
-
- Node *mem = n->in(MemNode::Memory);
- for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
- Node* s = mem->fast_out(i);
- worklist.push(s);
- }
- while(worklist.size() != 0 && LCA != early) {
- Node* s = worklist.pop();
- if (s->is_Load() || s->Opcode() == Op_SafePoint ||
- (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
- continue;
- } else if (s->is_MergeMem()) {
- for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
- Node* s1 = s->fast_out(i);
- worklist.push(s1);
- }
- } else {
- Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
- assert(sctrl != NULL || s->outcnt() == 0, "must have control");
- if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) {
- LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
+ int load_alias_idx = C->get_alias_index(n->adr_type());
+ if (C->alias_type(load_alias_idx)->is_rewritable()) {
+
+ Node_List worklist;
+
+ Node *mem = n->in(MemNode::Memory);
+ for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
+ Node* s = mem->fast_out(i);
+ worklist.push(s);
+ }
+ while(worklist.size() != 0 && LCA != early) {
+ Node* s = worklist.pop();
+ if (s->is_Load() || s->Opcode() == Op_SafePoint ||
+ (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
+ continue;
+ } else if (s->is_MergeMem()) {
+ for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
+ Node* s1 = s->fast_out(i);
+ worklist.push(s1);
+ }
+ } else {
+ Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
+ assert(sctrl != NULL || s->outcnt() == 0, "must have control");
+ if (sctrl != NULL && !sctrl->is_top() && C->can_alias(s->adr_type(), load_alias_idx) && is_dominator(early, sctrl)) {
+ LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
+ }
}
}
}
@@ -4501,69 +4523,67 @@
#ifndef PRODUCT
//------------------------------dump-------------------------------------------
-void PhaseIdealLoop::dump( ) const {
+void PhaseIdealLoop::dump() const {
ResourceMark rm;
Arena* arena = Thread::current()->resource_area();
Node_Stack stack(arena, C->live_nodes() >> 2);
Node_List rpo_list;
VectorSet visited(arena);
visited.set(C->top()->_idx);
- rpo( C->root(), stack, visited, rpo_list );
+ rpo(C->root(), stack, visited, rpo_list);
// Dump root loop indexed by last element in PO order
- dump( _ltree_root, rpo_list.size(), rpo_list );
+ dump(_ltree_root, rpo_list.size(), rpo_list);
}
-void PhaseIdealLoop::dump( IdealLoopTree *loop, uint idx, Node_List &rpo_list ) const {
+void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) const {
loop->dump_head();
// Now scan for CFG nodes in the same loop
- for( uint j=idx; j > 0; j-- ) {
- Node *n = rpo_list[j-1];
- if( !_nodes[n->_idx] ) // Skip dead nodes
+ for (uint j = idx; j > 0; j--) {
+ Node* n = rpo_list[j-1];
+ if (!_nodes[n->_idx]) // Skip dead nodes
continue;
- if( get_loop(n) != loop ) { // Wrong loop nest
- if( get_loop(n)->_head == n && // Found nested loop?
- get_loop(n)->_parent == loop )
- dump(get_loop(n),rpo_list.size(),rpo_list); // Print it nested-ly
+
+ if (get_loop(n) != loop) { // Wrong loop nest
+ if (get_loop(n)->_head == n && // Found nested loop?
+ get_loop(n)->_parent == loop)
+ dump(get_loop(n), rpo_list.size(), rpo_list); // Print it nested-ly
continue;
}
// Dump controlling node
- for( uint x = 0; x < loop->_nest; x++ )
- tty->print(" ");
+ tty->sp(2 * loop->_nest);
tty->print("C");
- if( n == C->root() ) {
+ if (n == C->root()) {
n->dump();
} else {
Node* cached_idom = idom_no_update(n);
- Node *computed_idom = n->in(0);
- if( n->is_Region() ) {
+ Node* computed_idom = n->in(0);
+ if (n->is_Region()) {
computed_idom = compute_idom(n);
// computed_idom() will return n->in(0) when idom(n) is an IfNode (or
// any MultiBranch ctrl node), so apply a similar transform to
// the cached idom returned from idom_no_update.
cached_idom = find_non_split_ctrl(cached_idom);
}
- tty->print(" ID:%d",computed_idom->_idx);
+ tty->print(" ID:%d", computed_idom->_idx);
n->dump();
- if( cached_idom != computed_idom ) {
+ if (cached_idom != computed_idom) {
tty->print_cr("*** BROKEN IDOM! Computed as: %d, cached as: %d",
computed_idom->_idx, cached_idom->_idx);
}
}
// Dump nodes it controls
- for( uint k = 0; k < _nodes.Size(); k++ ) {
+ for (uint k = 0; k < _nodes.Size(); k++) {
// (k < C->unique() && get_ctrl(find(k)) == n)
if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) {
- Node *m = C->root()->find(k);
- if( m && m->outcnt() > 0 ) {
+ Node* m = C->root()->find(k);
+ if (m && m->outcnt() > 0) {
if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
tty->print_cr("*** BROKEN CTRL ACCESSOR! _nodes[k] is %p, ctrl is %p",
_nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL);
}
- for( uint j = 0; j < loop->_nest; j++ )
- tty->print(" ");
- tty->print(" ");
+ tty->sp(2 * loop->_nest + 1);
m->dump();
}
}
@@ -4574,7 +4594,7 @@
// Collect a R-P-O for the whole CFG.
// Result list is in post-order (scan backwards for RPO)
-void PhaseIdealLoop::rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const {
+void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const {
stk.push(start, 0);
visited.set(start->_idx);
@@ -4596,7 +4616,7 @@
//=============================================================================
-//------------------------------LoopTreeIterator-----------------------------------
+//------------------------------LoopTreeIterator-------------------------------
// Advance to next loop tree using a preorder, left-to-right traversal.
void LoopTreeIterator::next() {
--- a/src/hotspot/share/opto/loopnode.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/loopnode.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -623,6 +623,8 @@
// Estimate the number of nodes required when cloning a loop (body).
uint est_loop_clone_sz(uint factor) const;
+ // Estimate the number of nodes required when unrolling a loop (body).
+ uint est_loop_unroll_sz(uint factor) const;
// Compute loop trip count if possible
void compute_trip_count(PhaseIdealLoop* phase);
@@ -654,11 +656,16 @@
void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
#ifndef PRODUCT
- void dump_head( ) const; // Dump loop head only
+ void dump_head() const; // Dump loop head only
void dump() const; // Dump this loop recursively
void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const;
#endif
+ private:
+ enum { EMPTY_LOOP_SIZE = 7 }; // Number of nodes in an empty loop.
+
+ // Estimate the number of nodes resulting from control and data flow merge.
+ uint est_loop_flow_merge_sz() const;
};
// -----------------------------PhaseIdealLoop---------------------------------
@@ -675,7 +682,7 @@
PhaseIterGVN &_igvn;
// Head of loop tree
- IdealLoopTree *_ltree_root;
+ IdealLoopTree* _ltree_root;
// Array of pre-order numbers, plus post-visited bit.
// ZERO for not pre-visited. EVEN for pre-visited but not post-visited.
@@ -1017,9 +1024,9 @@
bool _has_irreducible_loops;
// Per-Node transform
- virtual Node *transform( Node *a_node ) { return 0; }
+ virtual Node* transform(Node* n) { return 0; }
- bool is_counted_loop(Node* x, IdealLoopTree*& loop);
+ bool is_counted_loop(Node* n, IdealLoopTree* &loop);
IdealLoopTree* create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
IdealLoopTree* loop, float cl_prob, float le_fcnt,
Node*& entry_control, Node*& iffalse);
@@ -1034,7 +1041,7 @@
return (IdealLoopTree*)_nodes[n->_idx];
}
- IdealLoopTree *ltree_root() const { return _ltree_root; }
+ IdealLoopTree* ltree_root() const { return _ltree_root; }
// Is 'n' a (nested) member of 'loop'?
int is_member( const IdealLoopTree *loop, Node *n ) const {
@@ -1319,7 +1326,7 @@
// same block. Split thru the Region.
void do_split_if( Node *iff );
- // Conversion of fill/copy patterns into intrisic versions
+ // Conversion of fill/copy patterns into intrinsic versions
bool do_intrinsify_fill();
bool intrinsify_fill(IdealLoopTree* lpt);
bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
@@ -1419,18 +1426,18 @@
public:
void set_created_loop_node() { _created_loop_node = true; }
bool created_loop_node() { return _created_loop_node; }
- void register_new_node( Node *n, Node *blk );
+ void register_new_node(Node* n, Node* blk);
#ifdef ASSERT
void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA);
#endif
#ifndef PRODUCT
- void dump( ) const;
- void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const;
+ void dump() const;
+ void dump(IdealLoopTree* loop, uint rpo_idx, Node_List &rpo_list) const;
void verify() const; // Major slow :-)
- void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const;
- IdealLoopTree *get_loop_idx(Node* n) const {
+ void verify_compare(Node* n, const PhaseIdealLoop* loop_verify, VectorSet &visited) const;
+ IdealLoopTree* get_loop_idx(Node* n) const {
// Dead nodes have no loop, so return the top level loop instead
return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root;
}
@@ -1439,7 +1446,8 @@
static int _loop_invokes; // Count of PhaseIdealLoop invokes
static int _loop_work; // Sum of PhaseIdealLoop x _unique
#endif
- void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const;
+
+ void rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const;
};
--- a/src/hotspot/share/opto/loopopts.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/loopopts.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -41,9 +41,6 @@
#include "opto/rootnode.hpp"
#include "opto/subnode.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_ZGC
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#endif
//=============================================================================
//------------------------------split_thru_phi---------------------------------
@@ -653,7 +650,10 @@
}
}//for
Node* bol = iff->in(1);
- assert(bol->Opcode() == Op_Bool, "");
+ if (bol->Opcode() == Op_Opaque4) {
+ return NULL; // Ignore loop predicate checks (the Opaque4 ensures they will go away)
+ }
+ assert(bol->Opcode() == Op_Bool, "Unexpected node");
int cmp_op = bol->in(1)->Opcode();
// It is expensive to generate flags from a float compare.
// Avoid duplicated float compare.
@@ -1072,26 +1072,21 @@
// uses.
// A better fix for this problem can be found in the BugTraq entry, but
// expediency for Mantis demands this hack.
- // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
- // split_if_with_blocks from splitting a block because we could not move around
- // the FastLockNode.
+#ifdef _LP64
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
Node* n = region->fast_out(i);
if (n->is_Phi()) {
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j);
- if (m->is_FastLock())
- return false;
-#ifdef _LP64
if (m->Opcode() == Op_ConvI2L)
return false;
if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
return false;
}
-#endif
}
}
}
+#endif
return true;
}
--- a/src/hotspot/share/opto/machnode.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/machnode.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -197,7 +197,7 @@
// ADLC inherit from this class.
class MachNode : public Node {
public:
- MachNode() : Node((uint)0), _num_opnds(0), _opnds(NULL) {
+ MachNode() : Node((uint)0), _barrier(0), _num_opnds(0), _opnds(NULL) {
init_class_id(Class_Mach);
}
// Required boilerplate
@@ -211,6 +211,9 @@
// no constant base node input.
virtual uint mach_constant_base_node_input() const { return (uint)-1; }
+ uint8_t barrier_data() const { return _barrier; }
+ void set_barrier_data(uint data) { _barrier = data; }
+
// Copy inputs and operands to new node of instruction.
// Called from cisc_version() and short_branch_version().
// !!!! The method's body is defined in ad_<arch>.cpp file.
@@ -255,6 +258,9 @@
// output have choices - but they must use the same choice.
virtual uint two_adr( ) const { return 0; }
+ // The GC might require some barrier metadata for machine code emission.
+ uint8_t _barrier;
+
// Array of complex operand pointers. Each corresponds to zero or
// more leafs. Must be set by MachNode constructor to point to an
// internal array of MachOpers. The MachOper array is sized by
--- a/src/hotspot/share/opto/macro.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/macro.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -357,19 +357,38 @@
if (ac->modifies(offset, offset, &_igvn, true)) {
assert(ac->in(ArrayCopyNode::Dest) == alloc->result_cast(), "arraycopy destination should be allocation's result");
uint shift = exact_log2(type2aelembytes(bt));
- Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
-#ifdef _LP64
- diff = _igvn.transform(new ConvI2LNode(diff));
-#endif
- diff = _igvn.transform(new LShiftXNode(diff, intcon(shift)));
+ Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
+ Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
+ const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
+ const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
- Node* off = _igvn.transform(new AddXNode(MakeConX(offset), diff));
- Node* base = ac->in(ArrayCopyNode::Src);
- Node* adr = _igvn.transform(new AddPNode(base, base, off));
- const TypePtr* adr_type = _igvn.type(base)->is_ptr()->add_offset(offset);
- if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
- // Don't emit a new load from src if src == dst but try to get the value from memory instead
- return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type->isa_oopptr(), alloc);
+ Node* adr = NULL;
+ const TypePtr* adr_type = NULL;
+ if (src_pos_t->is_con() && dest_pos_t->is_con()) {
+ intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
+ Node* base = ac->in(ArrayCopyNode::Src);
+ adr = _igvn.transform(new AddPNode(base, base, MakeConX(off)));
+ adr_type = _igvn.type(base)->is_ptr()->add_offset(off);
+ if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
+ // Don't emit a new load from src if src == dst but try to get the value from memory instead
+ return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type->isa_oopptr(), alloc);
+ }
+ } else {
+ Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
+#ifdef _LP64
+ diff = _igvn.transform(new ConvI2LNode(diff));
+#endif
+ diff = _igvn.transform(new LShiftXNode(diff, intcon(shift)));
+
+ Node* off = _igvn.transform(new AddXNode(MakeConX(offset), diff));
+ Node* base = ac->in(ArrayCopyNode::Src);
+ adr = _igvn.transform(new AddPNode(base, base, off));
+ adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot);
+ if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
+ // Non constant offset in the array: we can't statically
+ // determine the value
+ return NULL;
+ }
}
res = LoadNode::make(_igvn, ctl, mem, adr, adr_type, type, bt, MemNode::unordered, LoadNode::UnknownControl);
}
@@ -1633,14 +1652,11 @@
Node* size_in_bytes) {
InitializeNode* init = alloc->initialization();
// Store the klass & mark bits
- Node* mark_node = NULL;
- // For now only enable fast locking for non-array types
- if (UseBiasedLocking && (length == NULL)) {
- mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
- } else {
- mark_node = makecon(TypeRawPtr::make((address)markWord::prototype().value()));
+ Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem);
+ if (!mark_node->is_Con()) {
+ transform_later(mark_node);
}
- rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
+ rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
int header_size = alloc->minimum_header_size(); // conservatively small
@@ -2577,15 +2593,36 @@
if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
// node is unreachable, so don't try to expand it
C->remove_macro_node(n);
- } else if (n->is_ArrayCopy()){
- int macro_count = C->macro_count();
+ continue;
+ }
+ int macro_count = C->macro_count();
+ switch (n->class_id()) {
+ case Node::Class_Lock:
+ expand_lock_node(n->as_Lock());
+ assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
+ break;
+ case Node::Class_Unlock:
+ expand_unlock_node(n->as_Unlock());
+ assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
+ break;
+ case Node::Class_ArrayCopy:
expand_arraycopy_node(n->as_ArrayCopy());
assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
+ break;
}
if (C->failing()) return true;
macro_idx --;
}
+ // All nodes except Allocate nodes are expanded now. There could be
+ // new optimization opportunities (such as folding newly created
+ // load from a just allocated object). Run IGVN.
+ _igvn.set_delay_transform(false);
+ _igvn.optimize();
+ if (C->failing()) return true;
+
+ _igvn.set_delay_transform(true);
+
// expand "macro" nodes
// nodes are removed from the macro list as they are processed
while (C->macro_count() > 0) {
@@ -2604,12 +2641,6 @@
case Node::Class_AllocateArray:
expand_allocate_array(n->as_AllocateArray());
break;
- case Node::Class_Lock:
- expand_lock_node(n->as_Lock());
- break;
- case Node::Class_Unlock:
- expand_unlock_node(n->as_Unlock());
- break;
default:
assert(false, "unknown node type in macro list");
}
--- a/src/hotspot/share/opto/matcher.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/matcher.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1751,6 +1751,13 @@
_shared_nodes.map(leaf->_idx, ex);
}
+ // Have mach nodes inherit GC barrier data
+ if (leaf->is_LoadStore()) {
+ mach->set_barrier_data(leaf->as_LoadStore()->barrier_data());
+ } else if (leaf->is_Mem()) {
+ mach->set_barrier_data(leaf->as_Mem()->barrier_data());
+ }
+
return ex;
}
--- a/src/hotspot/share/opto/memnode.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/memnode.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -49,9 +49,6 @@
#include "utilities/copy.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
-#if INCLUDE_ZGC
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#endif
// Portions of code courtesy of Clifford Click
@@ -1555,6 +1552,22 @@
return phi;
}
+AllocateNode* LoadNode::is_new_object_mark_load(PhaseGVN *phase) const {
+ if (Opcode() == Op_LoadX) {
+ Node* address = in(MemNode::Address);
+ AllocateNode* alloc = AllocateNode::Ideal_allocation(address, phase);
+ Node* mem = in(MemNode::Memory);
+ if (alloc != NULL && mem->is_Proj() &&
+ mem->in(0) != NULL &&
+ mem->in(0) == alloc->initialization() &&
+ alloc->initialization()->proj_out_or_null(0) != NULL) {
+ return alloc;
+ }
+ }
+ return NULL;
+}
+
+
//------------------------------Ideal------------------------------------------
// If the load is from Field memory and the pointer is non-null, it might be possible to
// zero out the control input.
@@ -1683,6 +1696,13 @@
}
}
+ AllocateNode* alloc = is_new_object_mark_load(phase);
+ if (alloc != NULL && alloc->Opcode() == Op_Allocate && UseBiasedLocking) {
+ InitializeNode* init = alloc->initialization();
+ Node* control = init->proj_out(0);
+ return alloc->make_ideal_mark(phase, address, control, mem);
+ }
+
return progress ? this : NULL;
}
@@ -1941,6 +1961,12 @@
return Type::get_zero_type(_type->basic_type());
}
}
+
+ Node* alloc = is_new_object_mark_load(phase);
+ if (alloc != NULL && !(alloc->Opcode() == Op_Allocate && UseBiasedLocking)) {
+ return TypeX::make(markWord::prototype().value());
+ }
+
return _type;
}
@@ -2822,7 +2848,7 @@
: Node(required),
_type(rt),
_adr_type(at),
- _has_barrier(false)
+ _barrier(0)
{
init_req(MemNode::Control, c );
init_req(MemNode::Memory , mem);
--- a/src/hotspot/share/opto/memnode.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/memnode.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -43,6 +43,8 @@
bool _unaligned_access; // Unaligned access from unsafe
bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
bool _unsafe_access; // Access of unsafe origin.
+ uint8_t _barrier; // Bit field with barrier information
+
protected:
#ifdef ASSERT
const TypePtr* _adr_type; // What kind of memory is being addressed?
@@ -62,18 +64,30 @@
unset // The memory ordering is not set (used for testing)
} MemOrd;
protected:
- MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
- : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) {
+ MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) :
+ Node(c0,c1,c2),
+ _unaligned_access(false),
+ _mismatched_access(false),
+ _unsafe_access(false),
+ _barrier(0) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
- MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
- : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) {
+ MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) :
+ Node(c0,c1,c2,c3),
+ _unaligned_access(false),
+ _mismatched_access(false),
+ _unsafe_access(false),
+ _barrier(0) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
- MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
- : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) {
+ MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) :
+ Node(c0,c1,c2,c3,c4),
+ _unaligned_access(false),
+ _mismatched_access(false),
+ _unsafe_access(false),
+ _barrier(0) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
@@ -125,6 +139,9 @@
#endif
}
+ uint8_t barrier_data() { return _barrier; }
+ void set_barrier_data(uint8_t barrier_data) { _barrier = barrier_data; }
+
// Search through memory states which precede this node (load or store).
// Look for an exact match for the address, with no intervening
// aliased stores.
@@ -181,7 +198,7 @@
// this field.
const MemOrd _mo;
- uint _barrier; // Bit field with barrier information
+ AllocateNode* is_new_object_mark_load(PhaseGVN *phase) const;
protected:
virtual bool cmp(const Node &n) const;
@@ -194,7 +211,7 @@
public:
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
- : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _barrier(0), _type(rt) {
+ : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) {
init_class_id(Class_Load);
}
inline bool is_unordered() const { return !is_acquire(); }
@@ -263,10 +280,6 @@
Node* convert_to_unsigned_load(PhaseGVN& gvn);
Node* convert_to_signed_load(PhaseGVN& gvn);
- void copy_barrier_info(const Node* src) { _barrier = src->as_Load()->_barrier; }
- uint barrier_data() { return _barrier; }
- void set_barrier_data(uint barrier_data) { _barrier |= barrier_data; }
-
void pin() { _control_dependency = Pinned; }
bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; }
@@ -818,7 +831,7 @@
private:
const Type* const _type; // What kind of value is loaded?
const TypePtr* _adr_type; // What kind of memory is being addressed?
- bool _has_barrier;
+ uint8_t _barrier; // Bit field with barrier information
virtual uint size_of() const; // Size is bigger
public:
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
@@ -831,8 +844,9 @@
bool result_not_used() const;
MemBarNode* trailing_membar() const;
- void set_has_barrier() { _has_barrier = true; };
- bool has_barrier() const { return _has_barrier; };
+
+ uint8_t barrier_data() { return _barrier; }
+ void set_barrier_data(uint8_t barrier_data) { _barrier = barrier_data; }
};
class LoadStoreConditionalNode : public LoadStoreNode {
@@ -884,6 +898,7 @@
MemNode::MemOrd order() const {
return _mem_ord;
}
+ virtual uint size_of() const { return sizeof(*this); }
};
class CompareAndExchangeNode : public LoadStoreNode {
@@ -901,6 +916,7 @@
MemNode::MemOrd order() const {
return _mem_ord;
}
+ virtual uint size_of() const { return sizeof(*this); }
};
//------------------------------CompareAndSwapBNode---------------------------
--- a/src/hotspot/share/opto/node.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/node.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -546,9 +546,6 @@
if (n->is_SafePoint()) {
n->as_SafePoint()->clone_replaced_nodes();
}
- if (n->is_Load()) {
- n->as_Load()->copy_barrier_info(this);
- }
return n; // Return the clone
}
@@ -1473,10 +1470,6 @@
if (req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0) {
return false;
}
- BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
- if (!bs->needs_anti_dependence_check(this)) {
- return false;
- }
return in(1)->bottom_type()->has_memory();
}
--- a/src/hotspot/share/opto/node.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/node.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -83,8 +83,6 @@
class JumpNode;
class JumpProjNode;
class LoadNode;
-class LoadBarrierNode;
-class LoadBarrierSlowRegNode;
class LoadStoreNode;
class LoadStoreConditionalNode;
class LockNode;
@@ -642,7 +640,6 @@
DEFINE_CLASS_ID(MemBar, Multi, 3)
DEFINE_CLASS_ID(Initialize, MemBar, 0)
DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
- DEFINE_CLASS_ID(LoadBarrier, Multi, 4)
DEFINE_CLASS_ID(Mach, Node, 1)
DEFINE_CLASS_ID(MachReturn, Mach, 0)
@@ -679,7 +676,6 @@
DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
- DEFINE_CLASS_ID(LoadBarrierSlowReg, Type, 7)
DEFINE_CLASS_ID(Proj, Node, 3)
DEFINE_CLASS_ID(CatchProj, Proj, 0)
@@ -836,8 +832,6 @@
DEFINE_CLASS_QUERY(Load)
DEFINE_CLASS_QUERY(LoadStore)
DEFINE_CLASS_QUERY(LoadStoreConditional)
- DEFINE_CLASS_QUERY(LoadBarrier)
- DEFINE_CLASS_QUERY(LoadBarrierSlowReg)
DEFINE_CLASS_QUERY(Lock)
DEFINE_CLASS_QUERY(Loop)
DEFINE_CLASS_QUERY(Mach)
--- a/src/hotspot/share/opto/output.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/output.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -31,6 +31,8 @@
#include "compiler/compileBroker.hpp"
#include "compiler/compilerDirectives.hpp"
#include "compiler/oopMap.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/ad.hpp"
#include "opto/callnode.hpp"
@@ -114,35 +116,33 @@
}
}
+ // Keeper of sizing aspects
+ BufferSizingData buf_sizes = BufferSizingData();
+
+ // Initialize code buffer
+ estimate_buffer_size(buf_sizes._const);
+ if (failing()) return;
+
+ // Pre-compute the length of blocks and replace
+ // long branches with short if machine supports it.
+ // Must be done before ScheduleAndBundle due to SPARC delay slots
uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
blk_starts[0] = 0;
-
- // Initialize code buffer and process short branches.
- CodeBuffer* cb = init_buffer(blk_starts);
-
- if (cb == NULL || failing()) {
- return;
- }
+ shorten_branches(blk_starts, buf_sizes);
ScheduleAndBundle();
-
-#ifndef PRODUCT
- if (trace_opto_output()) {
- tty->print("\n---- After ScheduleAndBundle ----\n");
- for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
- tty->print("\nBB#%03d:\n", i);
- Block* block = _cfg->get_block(i);
- for (uint j = 0; j < block->number_of_nodes(); j++) {
- Node* n = block->get_node(j);
- OptoReg::Name reg = _regalloc->get_reg_first(n);
- tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
- n->dump();
- }
- }
+ if (failing()) {
+ return;
}
-#endif
-
- if (failing()) {
+
+ // Late barrier analysis must be done after schedule and bundle
+ // Otherwise liveness based spilling will fail
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ bs->late_barrier_analysis();
+
+ // Complete sizing of codebuffer
+ CodeBuffer* cb = init_buffer(buf_sizes);
+ if (cb == NULL || failing()) {
return;
}
@@ -223,7 +223,7 @@
// The architecture description provides short branch variants for some long
// branch instructions. Replace eligible long branches with short branches.
-void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) {
+void Compile::shorten_branches(uint* blk_starts, BufferSizingData& buf_sizes) {
// Compute size of each block, method size, and relocation information size
uint nblocks = _cfg->number_of_blocks();
@@ -241,11 +241,11 @@
bool has_short_branch_candidate = false;
// Initialize the sizes to 0
- code_size = 0; // Size in bytes of generated code
- stub_size = 0; // Size in bytes of all stub entries
+ int code_size = 0; // Size in bytes of generated code
+ int stub_size = 0; // Size in bytes of all stub entries
// Size in bytes of all relocation entries, including those in local stubs.
// Start with 2-bytes of reloc info for the unvalidated entry point
- reloc_size = 1; // Number of relocation entries
+ int reloc_size = 1; // Number of relocation entries
// Make three passes. The first computes pessimistic blk_starts,
// relative jmp_offset and reloc_size information. The second performs
@@ -479,6 +479,10 @@
// a relocation index.
// The CodeBuffer will expand the locs array if this estimate is too low.
reloc_size *= 10 / sizeof(relocInfo);
+
+ buf_sizes._reloc = reloc_size;
+ buf_sizes._code = code_size;
+ buf_sizes._stub = stub_size;
}
//------------------------------FillLocArray-----------------------------------
@@ -490,8 +494,8 @@
// This should never have accepted Bad before
assert(OptoReg::is_valid(regnum), "location must be valid");
return (OptoReg::is_reg(regnum))
- ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
- : new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum)));
+ ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
+ : new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum)));
}
@@ -610,12 +614,12 @@
}
#endif //_LP64
else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
- OptoReg::is_reg(regnum) ) {
+ OptoReg::is_reg(regnum) ) {
array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double()
- ? Location::float_in_dbl : Location::normal ));
+ ? Location::float_in_dbl : Location::normal ));
} else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
- ? Location::int_in_long : Location::normal ));
+ ? Location::int_in_long : Location::normal ));
} else if( t->base() == Type::NarrowOop ) {
array->append(new_loc_value( _regalloc, regnum, Location::narrowoop ));
} else {
@@ -626,48 +630,48 @@
// No register. It must be constant data.
switch (t->base()) {
- case Type::Half: // Second half of a double
- ShouldNotReachHere(); // Caller should skip 2nd halves
- break;
- case Type::AnyPtr:
- array->append(new ConstantOopWriteValue(NULL));
- break;
- case Type::AryPtr:
- case Type::InstPtr: // fall through
- array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
- break;
- case Type::NarrowOop:
- if (t == TypeNarrowOop::NULL_PTR) {
+ case Type::Half: // Second half of a double
+ ShouldNotReachHere(); // Caller should skip 2nd halves
+ break;
+ case Type::AnyPtr:
array->append(new ConstantOopWriteValue(NULL));
- } else {
- array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
+ break;
+ case Type::AryPtr:
+ case Type::InstPtr: // fall through
+ array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
+ break;
+ case Type::NarrowOop:
+ if (t == TypeNarrowOop::NULL_PTR) {
+ array->append(new ConstantOopWriteValue(NULL));
+ } else {
+ array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
+ }
+ break;
+ case Type::Int:
+ array->append(new ConstantIntValue(t->is_int()->get_con()));
+ break;
+ case Type::RawPtr:
+ // A return address (T_ADDRESS).
+ assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
+#ifdef _LP64
+ // Must be restored to the full-width 64-bit stack slot.
+ array->append(new ConstantLongValue(t->is_ptr()->get_con()));
+#else
+ array->append(new ConstantIntValue(t->is_ptr()->get_con()));
+#endif
+ break;
+ case Type::FloatCon: {
+ float f = t->is_float_constant()->getf();
+ array->append(new ConstantIntValue(jint_cast(f)));
+ break;
}
- break;
- case Type::Int:
- array->append(new ConstantIntValue(t->is_int()->get_con()));
- break;
- case Type::RawPtr:
- // A return address (T_ADDRESS).
- assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
+ case Type::DoubleCon: {
+ jdouble d = t->is_double_constant()->getd();
#ifdef _LP64
- // Must be restored to the full-width 64-bit stack slot.
- array->append(new ConstantLongValue(t->is_ptr()->get_con()));
+ array->append(new ConstantIntValue((jint)0));
+ array->append(new ConstantDoubleValue(d));
#else
- array->append(new ConstantIntValue(t->is_ptr()->get_con()));
-#endif
- break;
- case Type::FloatCon: {
- float f = t->is_float_constant()->getf();
- array->append(new ConstantIntValue(jint_cast(f)));
- break;
- }
- case Type::DoubleCon: {
- jdouble d = t->is_double_constant()->getd();
-#ifdef _LP64
- array->append(new ConstantIntValue((jint)0));
- array->append(new ConstantDoubleValue(d));
-#else
- // Repack the double as two jints.
+ // Repack the double as two jints.
// The convention the interpreter uses is that the second local
// holds the first raw word of the native double representation.
// This is actually reasonable, since locals and stack arrays
@@ -679,15 +683,15 @@
array->append(new ConstantIntValue(acc.words[1]));
array->append(new ConstantIntValue(acc.words[0]));
#endif
- break;
- }
- case Type::Long: {
- jlong d = t->is_long()->get_con();
+ break;
+ }
+ case Type::Long: {
+ jlong d = t->is_long()->get_con();
#ifdef _LP64
- array->append(new ConstantIntValue((jint)0));
- array->append(new ConstantLongValue(d));
+ array->append(new ConstantIntValue((jint)0));
+ array->append(new ConstantLongValue(d));
#else
- // Repack the long as two jints.
+ // Repack the long as two jints.
// The convention the interpreter uses is that the second local
// holds the first raw word of the native double representation.
// This is actually reasonable, since locals and stack arrays
@@ -699,14 +703,14 @@
array->append(new ConstantIntValue(acc.words[1]));
array->append(new ConstantIntValue(acc.words[0]));
#endif
- break;
- }
- case Type::Top: // Add an illegal value here
- array->append(new LocationValue(Location()));
- break;
- default:
- ShouldNotReachHere();
- break;
+ break;
+ }
+ case Type::Top: // Add an illegal value here
+ array->append(new LocationValue(Location()));
+ break;
+ default:
+ ShouldNotReachHere();
+ break;
}
}
@@ -871,58 +875,58 @@
// A simplified version of Process_OopMap_Node, to handle non-safepoints.
class NonSafepointEmitter {
- Compile* C;
- JVMState* _pending_jvms;
- int _pending_offset;
-
- void emit_non_safepoint();
+ Compile* C;
+ JVMState* _pending_jvms;
+ int _pending_offset;
+
+ void emit_non_safepoint();
public:
- NonSafepointEmitter(Compile* compile) {
- this->C = compile;
- _pending_jvms = NULL;
- _pending_offset = 0;
- }
-
- void observe_instruction(Node* n, int pc_offset) {
- if (!C->debug_info()->recording_non_safepoints()) return;
-
- Node_Notes* nn = C->node_notes_at(n->_idx);
- if (nn == NULL || nn->jvms() == NULL) return;
- if (_pending_jvms != NULL &&
- _pending_jvms->same_calls_as(nn->jvms())) {
- // Repeated JVMS? Stretch it up here.
- _pending_offset = pc_offset;
- } else {
+ NonSafepointEmitter(Compile* compile) {
+ this->C = compile;
+ _pending_jvms = NULL;
+ _pending_offset = 0;
+ }
+
+ void observe_instruction(Node* n, int pc_offset) {
+ if (!C->debug_info()->recording_non_safepoints()) return;
+
+ Node_Notes* nn = C->node_notes_at(n->_idx);
+ if (nn == NULL || nn->jvms() == NULL) return;
if (_pending_jvms != NULL &&
+ _pending_jvms->same_calls_as(nn->jvms())) {
+ // Repeated JVMS? Stretch it up here.
+ _pending_offset = pc_offset;
+ } else {
+ if (_pending_jvms != NULL &&
+ _pending_offset < pc_offset) {
+ emit_non_safepoint();
+ }
+ _pending_jvms = NULL;
+ if (pc_offset > C->debug_info()->last_pc_offset()) {
+ // This is the only way _pending_jvms can become non-NULL:
+ _pending_jvms = nn->jvms();
+ _pending_offset = pc_offset;
+ }
+ }
+ }
+
+ // Stay out of the way of real safepoints:
+ void observe_safepoint(JVMState* jvms, int pc_offset) {
+ if (_pending_jvms != NULL &&
+ !_pending_jvms->same_calls_as(jvms) &&
_pending_offset < pc_offset) {
emit_non_safepoint();
}
_pending_jvms = NULL;
- if (pc_offset > C->debug_info()->last_pc_offset()) {
- // This is the only way _pending_jvms can become non-NULL:
- _pending_jvms = nn->jvms();
- _pending_offset = pc_offset;
- }
}
- }
-
- // Stay out of the way of real safepoints:
- void observe_safepoint(JVMState* jvms, int pc_offset) {
- if (_pending_jvms != NULL &&
- !_pending_jvms->same_calls_as(jvms) &&
- _pending_offset < pc_offset) {
- emit_non_safepoint();
+
+ void flush_at_end() {
+ if (_pending_jvms != NULL) {
+ emit_non_safepoint();
+ }
+ _pending_jvms = NULL;
}
- _pending_jvms = NULL;
- }
-
- void flush_at_end() {
- if (_pending_jvms != NULL) {
- emit_non_safepoint();
- }
- _pending_jvms = NULL;
- }
};
void NonSafepointEmitter::emit_non_safepoint() {
@@ -952,15 +956,11 @@
}
//------------------------------init_buffer------------------------------------
-CodeBuffer* Compile::init_buffer(uint* blk_starts) {
+void Compile::estimate_buffer_size(int& const_req) {
// Set the initially allocated size
- int code_req = initial_code_capacity;
- int locs_req = initial_locs_capacity;
- int stub_req = initial_stub_capacity;
- int const_req = initial_const_capacity;
-
- int pad_req = NativeCall::instruction_size;
+ const_req = initial_const_capacity;
+
// The extra spacing after the code is necessary on some platforms.
// Sometimes we need to patch in a jump after the last instruction,
// if the nmethod has been deoptimized. (See 4932387, 4894843.)
@@ -972,7 +972,7 @@
// Compute prolog code size
_method_size = 0;
- _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
+ _frame_slots = OptoReg::reg2stack(_matcher->_old_SP) + _regalloc->_framesize;
#if defined(IA64) && !defined(AIX)
if (save_argument_registers()) {
// 4815101: this is a stub with implicit and unknown precision fp args.
@@ -1021,11 +1021,18 @@
// Initialize the space for the BufferBlob used to find and verify
// instruction size in MachNode::emit_size()
init_scratch_buffer_blob(const_req);
- if (failing()) return NULL; // Out of memory
-
- // Pre-compute the length of blocks and replace
- // long branches with short if machine supports it.
- shorten_branches(blk_starts, code_req, locs_req, stub_req);
+}
+
+CodeBuffer* Compile::init_buffer(BufferSizingData& buf_sizes) {
+
+ int stub_req = buf_sizes._stub;
+ int code_req = buf_sizes._code;
+ int const_req = buf_sizes._const;
+
+ int pad_req = NativeCall::instruction_size;
+
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ stub_req += bs->estimate_stub_size();
// nmethod and CodeBuffer count stubs & constants as part of method's code.
// class HandlerImpl is platform-specific and defined in the *.ad files.
@@ -1038,18 +1045,18 @@
code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
int total_req =
- const_req +
- code_req +
- pad_req +
- stub_req +
- exception_handler_req +
- deopt_handler_req; // deopt handler
+ const_req +
+ code_req +
+ pad_req +
+ stub_req +
+ exception_handler_req +
+ deopt_handler_req; // deopt handler
if (has_method_handle_invokes())
total_req += deopt_handler_req; // deopt MH handler
CodeBuffer* cb = code_buffer();
- cb->initialize(total_req, locs_req);
+ cb->initialize(total_req, buf_sizes._reloc);
// Have we run out of code space?
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
@@ -1268,12 +1275,12 @@
Process_OopMap_Node(mach, current_offset);
} // End if safepoint
- // If this is a null check, then add the start of the previous instruction to the list
+ // If this is a null check, then add the start of the previous instruction to the list
else if( mach->is_MachNullCheck() ) {
inct_starts[inct_cnt++] = previous_offset;
}
- // If this is a branch, then fill in the label with the target BB's label
+ // If this is a branch, then fill in the label with the target BB's label
else if (mach->is_MachBranch()) {
// This requires the TRUE branch target be in succs[0]
uint block_num = block->non_connector_successor(0)->_pre_order;
@@ -1284,8 +1291,8 @@
bool delay_slot_is_used = valid_bundle_info(n) &&
node_bundling(n)->use_unconditional_delay();
if (!delay_slot_is_used && mach->may_be_short_branch()) {
- assert(delay_slot == NULL, "not expecting delay slot node");
- int br_size = n->size(_regalloc);
+ assert(delay_slot == NULL, "not expecting delay slot node");
+ int br_size = n->size(_regalloc);
int offset = blk_starts[block_num] - current_offset;
if (block_num >= i) {
// Current and following block's offset are not
@@ -1343,7 +1350,7 @@
}
}
#ifdef ASSERT
- // Check that oop-store precedes the card-mark
+ // Check that oop-store precedes the card-mark
else if (mach->ideal_Opcode() == Op_StoreCM) {
uint storeCM_idx = j;
int count = 0;
@@ -1514,6 +1521,10 @@
}
#endif
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ bs->emit_stubs(*cb);
+ if (failing()) return;
+
#ifndef PRODUCT
// Information on the size of the method, without the extraneous code
Scheduling::increment_method_size(cb->insts_size());
@@ -1688,20 +1699,20 @@
// Initializer for class Scheduling
Scheduling::Scheduling(Arena *arena, Compile &compile)
- : _arena(arena),
- _cfg(compile.cfg()),
- _regalloc(compile.regalloc()),
- _scheduled(arena),
- _available(arena),
- _reg_node(arena),
- _pinch_free_list(arena),
- _next_node(NULL),
- _bundle_instr_count(0),
- _bundle_cycle_number(0),
- _bundle_use(0, 0, resource_count, &_bundle_use_elements[0])
+ : _arena(arena),
+ _cfg(compile.cfg()),
+ _regalloc(compile.regalloc()),
+ _scheduled(arena),
+ _available(arena),
+ _reg_node(arena),
+ _pinch_free_list(arena),
+ _next_node(NULL),
+ _bundle_instr_count(0),
+ _bundle_cycle_number(0),
+ _bundle_use(0, 0, resource_count, &_bundle_use_elements[0])
#ifndef PRODUCT
- , _branches(0)
- , _unconditional_delays(0)
+ , _branches(0)
+ , _unconditional_delays(0)
#endif
{
// Create a MachNopNode
@@ -1782,8 +1793,8 @@
_bundle_use.reset();
memcpy(_bundle_use_elements,
- Pipeline_Use::elaborated_elements,
- sizeof(Pipeline_Use::elaborated_elements));
+ Pipeline_Use::elaborated_elements,
+ sizeof(Pipeline_Use::elaborated_elements));
}
// Perform instruction scheduling and bundling over the sequence of
@@ -1810,6 +1821,22 @@
// Walk backwards over each basic block, computing the needed alignment
// Walk over all the basic blocks
scheduling.DoScheduling();
+
+#ifndef PRODUCT
+ if (trace_opto_output()) {
+ tty->print("\n---- After ScheduleAndBundle ----\n");
+ for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
+ tty->print("\nBB#%03d:\n", i);
+ Block* block = _cfg->get_block(i);
+ for (uint j = 0; j < block->number_of_nodes(); j++) {
+ Node* n = block->get_node(j);
+ OptoReg::Name reg = _regalloc->get_reg_first(n);
+ tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
+ n->dump();
+ }
+ }
+ }
+#endif
}
// Compute the latency of all the instructions. This is fairly simple,
@@ -1878,7 +1905,7 @@
#ifndef PRODUCT
if (_cfg->C->trace_opto_output())
tty->print("# NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n",
- n->_idx, _current_latency[n_idx], _bundle_cycle_number);
+ n->_idx, _current_latency[n_idx], _bundle_cycle_number);
#endif
return (false);
}
@@ -1895,7 +1922,7 @@
#ifndef PRODUCT
if (_cfg->C->trace_opto_output())
tty->print("# NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n",
- n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
+ n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
#endif
return (false);
}
@@ -2103,12 +2130,12 @@
// Don't allow safepoints in the branch shadow, that will
// cause a number of difficulties
if ( avail_pipeline->instructionCount() == 1 &&
- !avail_pipeline->hasMultipleBundles() &&
- !avail_pipeline->hasBranchDelay() &&
- Pipeline::instr_has_unit_size() &&
- d->size(_regalloc) == Pipeline::instr_unit_size() &&
- NodeFitsInBundle(d) &&
- !node_bundling(d)->used_in_delay()) {
+ !avail_pipeline->hasMultipleBundles() &&
+ !avail_pipeline->hasBranchDelay() &&
+ Pipeline::instr_has_unit_size() &&
+ d->size(_regalloc) == Pipeline::instr_unit_size() &&
+ NodeFitsInBundle(d) &&
+ !node_bundling(d)->used_in_delay()) {
if (d->is_Mach() && !d->is_MachSafePoint()) {
// A node that fits in the delay slot was found, so we need to
@@ -2153,13 +2180,13 @@
// step of the bundles
if (!NodeFitsInBundle(n)) {
#ifndef PRODUCT
- if (_cfg->C->trace_opto_output())
- tty->print("# *** STEP(branch won't fit) ***\n");
+ if (_cfg->C->trace_opto_output())
+ tty->print("# *** STEP(branch won't fit) ***\n");
#endif
- // Update the state information
- _bundle_instr_count = 0;
- _bundle_cycle_number += 1;
- _bundle_use.step(1);
+ // Update the state information
+ _bundle_instr_count = 0;
+ _bundle_cycle_number += 1;
+ _bundle_use.step(1);
}
}
@@ -2205,8 +2232,8 @@
#ifndef PRODUCT
if (_cfg->C->trace_opto_output())
tty->print("# *** STEP(%d >= %d instructions) ***\n",
- instruction_count + _bundle_instr_count,
- Pipeline::_max_instrs_per_cycle);
+ instruction_count + _bundle_instr_count,
+ Pipeline::_max_instrs_per_cycle);
#endif
step(1);
}
@@ -2412,7 +2439,7 @@
}
assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
if( last->is_Catch() ||
- (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
+ (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
// There might be a prior call. Skip it.
while (_bb_start < _bb_end && bb->get_node(--_bb_end)->is_MachProj());
} else if( last->is_MachNullCheck() ) {
@@ -2482,7 +2509,7 @@
}
#endif
#ifdef ASSERT
- verify_good_schedule(bb,"after block local scheduling");
+ verify_good_schedule(bb,"after block local scheduling");
#endif
}
@@ -2830,31 +2857,31 @@
//
void Scheduling::garbage_collect_pinch_nodes() {
#ifndef PRODUCT
- if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
+ if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
#endif
- int trace_cnt = 0;
- for (uint k = 0; k < _reg_node.Size(); k++) {
- Node* pinch = _reg_node[k];
- if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
- // no predecence input edges
- (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
- cleanup_pinch(pinch);
- _pinch_free_list.push(pinch);
- _reg_node.map(k, NULL);
+ int trace_cnt = 0;
+ for (uint k = 0; k < _reg_node.Size(); k++) {
+ Node* pinch = _reg_node[k];
+ if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
+ // no predecence input edges
+ (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
+ cleanup_pinch(pinch);
+ _pinch_free_list.push(pinch);
+ _reg_node.map(k, NULL);
#ifndef PRODUCT
- if (_cfg->C->trace_opto_output()) {
- trace_cnt++;
- if (trace_cnt > 40) {
- tty->print("\n");
- trace_cnt = 0;
- }
- tty->print(" %d", pinch->_idx);
+ if (_cfg->C->trace_opto_output()) {
+ trace_cnt++;
+ if (trace_cnt > 40) {
+ tty->print("\n");
+ trace_cnt = 0;
}
+ tty->print(" %d", pinch->_idx);
+ }
#endif
- }
}
+ }
#ifndef PRODUCT
- if (_cfg->C->trace_opto_output()) tty->print("\n");
+ if (_cfg->C->trace_opto_output()) tty->print("\n");
#endif
}
@@ -2891,19 +2918,19 @@
void Scheduling::print_statistics() {
// Print the size added by nops for bundling
tty->print("Nops added %d bytes to total of %d bytes",
- _total_nop_size, _total_method_size);
+ _total_nop_size, _total_method_size);
if (_total_method_size > 0)
tty->print(", for %.2f%%",
- ((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
+ ((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
tty->print("\n");
// Print the number of branch shadows filled
if (Pipeline::_branch_has_delay_slot) {
tty->print("Of %d branches, %d had unconditional delay slots filled",
- _total_branches, _total_unconditional_delays);
+ _total_branches, _total_unconditional_delays);
if (_total_branches > 0)
tty->print(", for %.2f%%",
- ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0);
+ ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0);
tty->print("\n");
}
@@ -2917,6 +2944,6 @@
if (total_bundles > 0)
tty->print("Average ILP (excluding nops) is %.2f\n",
- ((double)total_instructions) / ((double)total_bundles));
+ ((double)total_instructions) / ((double)total_bundles));
}
#endif
--- a/src/hotspot/share/opto/output.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/output.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -40,7 +40,6 @@
class PhaseChaitin;
class Pipeline_Use_Element;
class Pipeline_Use;
-
#ifndef PRODUCT
#define DEBUG_ARG(x) , x
#else
@@ -49,10 +48,7 @@
// Define the initial sizes for allocation of the resizable code buffer
enum {
- initial_code_capacity = 16 * 1024,
- initial_stub_capacity = 4 * 1024,
- initial_const_capacity = 4 * 1024,
- initial_locs_capacity = 3 * 1024
+ initial_const_capacity = 4 * 1024
};
//------------------------------Scheduling----------------------------------
--- a/src/hotspot/share/opto/phaseX.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/phaseX.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1648,14 +1648,14 @@
// of the mirror load depends on the type of 'n'. See LoadNode::Value().
// LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
- bool has_load_barriers = bs->has_load_barriers();
+ bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
const Type* ut = u->bottom_type();
if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
- if (has_load_barriers) {
+ if (has_load_barrier_nodes) {
// Search for load barriers behind the load
for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
Node* b = u->fast_out(i3);
@@ -1808,14 +1808,14 @@
// Loading the java mirror from a Klass requires two loads and the type
// of the mirror load depends on the type of 'n'. See LoadNode::Value().
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
- bool has_load_barriers = bs->has_load_barriers();
+ bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = m->fast_out(i2);
const Type* ut = u->bottom_type();
if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
- if (has_load_barriers) {
+ if (has_load_barrier_nodes) {
// Search for load barriers behind the load
for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
Node* b = u->fast_out(i3);
--- a/src/hotspot/share/opto/split_if.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/split_if.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -110,83 +110,90 @@
n->dump();
}
#endif
- // Clone down any block-local BoolNode uses of this CmpNode
- for (DUIterator i = n->outs(); n->has_out(i); i++) {
- Node* bol = n->out(i);
- assert( bol->is_Bool(), "" );
- if (bol->outcnt() == 1) {
- Node* use = bol->unique_out();
- if (use->Opcode() == Op_Opaque4) {
- if (use->outcnt() == 1) {
- Node* iff = use->unique_out();
- assert(iff->is_If(), "unexpected node type");
- Node *use_c = iff->in(0);
+ if (!n->is_FastLock()) {
+ // Clone down any block-local BoolNode uses of this CmpNode
+ for (DUIterator i = n->outs(); n->has_out(i); i++) {
+ Node* bol = n->out(i);
+ assert( bol->is_Bool(), "" );
+ if (bol->outcnt() == 1) {
+ Node* use = bol->unique_out();
+ if (use->Opcode() == Op_Opaque4) {
+ if (use->outcnt() == 1) {
+ Node* iff = use->unique_out();
+ assert(iff->is_If(), "unexpected node type");
+ Node *use_c = iff->in(0);
+ if (use_c == blk1 || use_c == blk2) {
+ continue;
+ }
+ }
+ } else {
+ // We might see an Opaque1 from a loop limit check here
+ assert(use->is_If() || use->is_CMove() || use->Opcode() == Op_Opaque1, "unexpected node type");
+ Node *use_c = use->is_If() ? use->in(0) : get_ctrl(use);
if (use_c == blk1 || use_c == blk2) {
+ assert(use->is_CMove(), "unexpected node type");
continue;
}
}
- } else {
- // We might see an Opaque1 from a loop limit check here
- assert(use->is_If() || use->is_CMove() || use->Opcode() == Op_Opaque1, "unexpected node type");
- Node *use_c = use->is_If() ? use->in(0) : get_ctrl(use);
- if (use_c == blk1 || use_c == blk2) {
- assert(use->is_CMove(), "unexpected node type");
- continue;
+ }
+ if (get_ctrl(bol) == blk1 || get_ctrl(bol) == blk2) {
+ // Recursively sink any BoolNode
+#ifndef PRODUCT
+ if( PrintOpto && VerifyLoopOptimizations ) {
+ tty->print("Cloning down: ");
+ bol->dump();
}
- }
- }
- if (get_ctrl(bol) == blk1 || get_ctrl(bol) == blk2) {
- // Recursively sink any BoolNode
-#ifndef PRODUCT
- if( PrintOpto && VerifyLoopOptimizations ) {
- tty->print("Cloning down: ");
- bol->dump();
- }
#endif
- for (DUIterator j = bol->outs(); bol->has_out(j); j++) {
- Node* u = bol->out(j);
- // Uses are either IfNodes, CMoves or Opaque4
- if (u->Opcode() == Op_Opaque4) {
- assert(u->in(1) == bol, "bad input");
- for (DUIterator_Last kmin, k = u->last_outs(kmin); k >= kmin; --k) {
- Node* iff = u->last_out(k);
- assert(iff->is_If() || iff->is_CMove(), "unexpected node type");
- assert( iff->in(1) == u, "" );
+ for (DUIterator j = bol->outs(); bol->has_out(j); j++) {
+ Node* u = bol->out(j);
+ // Uses are either IfNodes, CMoves or Opaque4
+ if (u->Opcode() == Op_Opaque4) {
+ assert(u->in(1) == bol, "bad input");
+ for (DUIterator_Last kmin, k = u->last_outs(kmin); k >= kmin; --k) {
+ Node* iff = u->last_out(k);
+ assert(iff->is_If() || iff->is_CMove(), "unexpected node type");
+ assert( iff->in(1) == u, "" );
+ // Get control block of either the CMove or the If input
+ Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
+ Node *x1 = bol->clone();
+ Node *x2 = u->clone();
+ register_new_node(x1, iff_ctrl);
+ register_new_node(x2, iff_ctrl);
+ _igvn.replace_input_of(x2, 1, x1);
+ _igvn.replace_input_of(iff, 1, x2);
+ }
+ _igvn.remove_dead_node(u);
+ --j;
+ } else {
+ // We might see an Opaque1 from a loop limit check here
+ assert(u->is_If() || u->is_CMove() || u->Opcode() == Op_Opaque1, "unexpected node type");
+ assert(u->in(1) == bol, "");
// Get control block of either the CMove or the If input
- Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
- Node *x1 = bol->clone();
- Node *x2 = u->clone();
- register_new_node(x1, iff_ctrl);
- register_new_node(x2, iff_ctrl);
- _igvn.replace_input_of(x2, 1, x1);
- _igvn.replace_input_of(iff, 1, x2);
+ Node *u_ctrl = u->is_If() ? u->in(0) : get_ctrl(u);
+ assert((u_ctrl != blk1 && u_ctrl != blk2) || u->is_CMove(), "won't converge");
+ Node *x = bol->clone();
+ register_new_node(x, u_ctrl);
+ _igvn.replace_input_of(u, 1, x);
+ --j;
}
- _igvn.remove_dead_node(u);
- --j;
- } else {
- // We might see an Opaque1 from a loop limit check here
- assert(u->is_If() || u->is_CMove() || u->Opcode() == Op_Opaque1, "unexpected node type");
- assert(u->in(1) == bol, "");
- // Get control block of either the CMove or the If input
- Node *u_ctrl = u->is_If() ? u->in(0) : get_ctrl(u);
- assert((u_ctrl != blk1 && u_ctrl != blk2) || u->is_CMove(), "won't converge");
- Node *x = bol->clone();
- register_new_node(x, u_ctrl);
- _igvn.replace_input_of(u, 1, x);
- --j;
}
+ _igvn.remove_dead_node(bol);
+ --i;
}
- _igvn.remove_dead_node(bol);
- --i;
}
}
// Clone down this CmpNode
for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; --j) {
- Node* bol = n->last_out(j);
- assert( bol->in(1) == n, "" );
+ Node* use = n->last_out(j);
+ uint pos = 1;
+ if (n->is_FastLock()) {
+ pos = TypeFunc::Parms + 2;
+ assert(use->is_Lock(), "FastLock only used by LockNode");
+ }
+ assert(use->in(pos) == n, "" );
Node *x = n->clone();
- register_new_node(x, get_ctrl(bol));
- _igvn.replace_input_of(bol, 1, x);
+ register_new_node(x, ctrl_or_self(use));
+ _igvn.replace_input_of(use, pos, x);
}
_igvn.remove_dead_node( n );
--- a/src/hotspot/share/opto/superword.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/superword.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -2045,12 +2045,11 @@
for (uint k = 0; k < use->req(); k++) {
Node* n = use->in(k);
if (def == n) {
- // reductions should only have a Phi use at the the loop
- // head and out of loop uses
+ // Reductions should only have a Phi use at the loop head or a non-phi use
+ // outside of the loop if it is the last element of the pack (e.g. SafePoint).
if (def->is_reduction() &&
((use->is_Phi() && use->in(0) == _lpt->_head) ||
- !_lpt->is_member(_phase->get_loop(_phase->ctrl_or_self(use))))) {
- assert(i == p->size()-1, "must be last element of the pack");
+ (!_lpt->is_member(_phase->get_loop(_phase->ctrl_or_self(use))) && i == p->size()-1))) {
continue;
}
if (!is_vector_use(use, k)) {
@@ -2402,6 +2401,12 @@
const TypePtr* atyp = n->adr_type();
vn = StoreVectorNode::make(opc, ctl, mem, adr, atyp, val, vlen);
vlen_in_bytes = vn->as_StoreVector()->memory_size();
+ } else if (VectorNode::is_roundopD(n)) {
+ Node* in1 = vector_opd(p, 1);
+ Node* in2 = low_adr->in(2);
+ assert(in2->is_Con(), "Constant rounding mode expected.");
+ vn = VectorNode::make(opc, in1, in2, vlen, velt_basic_type(n));
+ vlen_in_bytes = vn->as_Vector()->length_in_bytes();
} else if (VectorNode::is_muladds2i(n)) {
assert(n->req() == 5u, "MulAddS2I should have 4 operands.");
Node* in1 = vector_opd(p, 1);
--- a/src/hotspot/share/opto/type.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/type.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1791,6 +1791,7 @@
#define Op_SubX Op_SubL
#define Op_XorX Op_XorL
#define Op_URShiftX Op_URShiftL
+#define Op_LoadX Op_LoadL
// conversions
#define ConvI2X(x) ConvI2L(x)
#define ConvL2X(x) (x)
@@ -1838,6 +1839,7 @@
#define Op_SubX Op_SubI
#define Op_XorX Op_XorI
#define Op_URShiftX Op_URShiftI
+#define Op_LoadX Op_LoadI
// conversions
#define ConvI2X(x) (x)
#define ConvL2X(x) ConvL2I(x)
--- a/src/hotspot/share/opto/vectornode.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/vectornode.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -128,6 +128,9 @@
case Op_NegD:
assert(bt == T_DOUBLE, "must be");
return Op_NegVD;
+ case Op_RoundDoubleMode:
+ assert(bt == T_DOUBLE, "must be");
+ return Op_RoundDoubleModeV;
case Op_SqrtF:
assert(bt == T_FLOAT, "must be");
return Op_SqrtVF;
@@ -259,6 +262,13 @@
return false;
}
+bool VectorNode::is_roundopD(Node *n) {
+ if (n->Opcode() == Op_RoundDoubleMode) {
+ return true;
+ }
+ return false;
+}
+
bool VectorNode::is_shift(Node* n) {
switch (n->Opcode()) {
case Op_LShiftI:
@@ -407,6 +417,8 @@
case Op_MinV: return new MinVNode(n1, n2, vt);
case Op_MaxV: return new MaxVNode(n1, n2, vt);
+ case Op_RoundDoubleModeV: return new RoundDoubleModeVNode(n1, n2, vt);
+
case Op_MulAddVS2VI: return new MulAddVS2VINode(n1, n2, vt);
default:
fatal("Missed vector creation for '%s'", NodeClassNames[vopc]);
--- a/src/hotspot/share/opto/vectornode.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/opto/vectornode.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -70,6 +70,7 @@
static bool is_type_transition_short_to_int(Node* n);
static bool is_type_transition_to_int(Node* n);
static bool is_muladds2i(Node* n);
+ static bool is_roundopD(Node * n);
static bool is_invariant_vector(Node* n);
// [Start, end) half-open range defining which operands are vectors
static void vector_operands(Node* n, uint* start, uint* end);
@@ -447,6 +448,13 @@
SqrtVFNode(Node* in, const TypeVect* vt) : VectorNode(in,vt) {}
virtual int Opcode() const;
};
+//------------------------------RoundDoubleVNode--------------------------------
+// Vector round double
+class RoundDoubleModeVNode : public VectorNode {
+ public:
+ RoundDoubleModeVNode(Node* in1, Node* in2, const TypeVect* vt) : VectorNode(in1, in2, vt) {}
+ virtual int Opcode() const;
+};
//------------------------------SqrtVDNode--------------------------------------
// Vector Sqrt double
--- a/src/hotspot/share/prims/jni.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jni.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -65,7 +65,6 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -88,6 +87,9 @@
#include "utilities/histogram.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
+#if INCLUDE_JVMCI
+#include "jvmci/jvmciCompiler.hpp"
+#endif
static jint CurrentVersion = JNI_VERSION_10;
--- a/src/hotspot/share/prims/jniCheck.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jniCheck.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -448,16 +448,16 @@
Method* jniCheck::validate_jmethod_id(JavaThread* thr, jmethodID method_id) {
ASSERT_OOPS_ALLOWED;
// do the fast jmethodID check first
- Method* moop = Method::checked_resolve_jmethod_id(method_id);
- if (moop == NULL) {
+ Method* m = Method::checked_resolve_jmethod_id(method_id);
+ if (m == NULL) {
ReportJNIFatalError(thr, fatal_wrong_class_or_method);
}
- // jmethodIDs are supposed to be weak handles in the class loader data,
+ // jmethodIDs are handles in the class loader data,
// but that can be expensive so check it last
else if (!Method::is_method_id(method_id)) {
ReportJNIFatalError(thr, fatal_non_weak_method);
}
- return moop;
+ return m;
}
@@ -518,18 +518,29 @@
}
}
-void jniCheck::validate_call_object(JavaThread* thr, jobject obj, jmethodID method_id) {
- /* validate the object being passed */
+void jniCheck::validate_call(JavaThread* thr, jclass clazz, jmethodID method_id, jobject obj) {
ASSERT_OOPS_ALLOWED;
- jniCheck::validate_jmethod_id(thr, method_id);
- jniCheck::validate_object(thr, obj);
-}
+ Method* m = jniCheck::validate_jmethod_id(thr, method_id);
+ InstanceKlass* holder = m->method_holder();
+
+ if (clazz != NULL) {
+ Klass* k = jniCheck::validate_class(thr, clazz, false);
+ // Check that method is in the class, must be InstanceKlass
+ if (!InstanceKlass::cast(k)->is_subtype_of(holder)) {
+ ReportJNIFatalError(thr, fatal_wrong_class_or_method);
+ }
+ }
-void jniCheck::validate_call_class(JavaThread* thr, jclass clazz, jmethodID method_id) {
- /* validate the class being passed */
- ASSERT_OOPS_ALLOWED;
- jniCheck::validate_jmethod_id(thr, method_id);
- jniCheck::validate_class(thr, clazz, false);
+ if (obj != NULL) {
+ oop recv = jniCheck::validate_object(thr, obj);
+ assert(recv != NULL, "validate_object checks that");
+ Klass* ik = recv->klass();
+
+ // Check that the object is a subtype of method holder too.
+ if (!InstanceKlass::cast(ik)->is_subtype_of(holder)) {
+ ReportJNIFatalError(thr, fatal_wrong_class_or_method);
+ }
+ }
}
@@ -595,8 +606,7 @@
jboolean isStatic))
functionEnter(thr);
IN_VM(
- jniCheck::validate_class(thr, cls, false);
- jniCheck::validate_jmethod_id(thr, methodID);
+ jniCheck::validate_call(thr, cls, methodID);
)
jobject result = UNCHECKED()->ToReflectedMethod(env, cls, methodID,
isStatic);
@@ -852,8 +862,7 @@
functionEnter(thr);
va_list args;
IN_VM(
- jniCheck::validate_class(thr, clazz, false);
- jniCheck::validate_jmethod_id(thr, methodID);
+ jniCheck::validate_call(thr, clazz, methodID);
)
va_start(args, methodID);
jobject result = UNCHECKED()->NewObjectV(env,clazz,methodID,args);
@@ -869,8 +878,7 @@
va_list args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_class(thr, clazz, false);
- jniCheck::validate_jmethod_id(thr, methodID);
+ jniCheck::validate_call(thr, clazz, methodID);
)
jobject result = UNCHECKED()->NewObjectV(env,clazz,methodID,args);
functionExit(thr);
@@ -884,8 +892,7 @@
const jvalue *args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_class(thr, clazz, false);
- jniCheck::validate_jmethod_id(thr, methodID);
+ jniCheck::validate_call(thr, clazz, methodID);
)
jobject result = UNCHECKED()->NewObjectA(env,clazz,methodID,args);
functionExit(thr);
@@ -941,7 +948,7 @@
functionEnter(thr); \
va_list args; \
IN_VM( \
- jniCheck::validate_call_object(thr, obj, methodID); \
+ jniCheck::validate_call(thr, NULL, methodID, obj); \
) \
va_start(args,methodID); \
ResultType result =UNCHECKED()->Call##Result##MethodV(env, obj, methodID, \
@@ -959,7 +966,7 @@
va_list args)) \
functionEnter(thr); \
IN_VM(\
- jniCheck::validate_call_object(thr, obj, methodID); \
+ jniCheck::validate_call(thr, NULL, methodID, obj); \
) \
ResultType result = UNCHECKED()->Call##Result##MethodV(env, obj, methodID,\
args); \
@@ -975,7 +982,7 @@
const jvalue * args)) \
functionEnter(thr); \
IN_VM( \
- jniCheck::validate_call_object(thr, obj, methodID); \
+ jniCheck::validate_call(thr, NULL, methodID, obj); \
) \
ResultType result = UNCHECKED()->Call##Result##MethodA(env, obj, methodID,\
args); \
@@ -1002,7 +1009,7 @@
functionEnter(thr);
va_list args;
IN_VM(
- jniCheck::validate_call_object(thr, obj, methodID);
+ jniCheck::validate_call(thr, NULL, methodID, obj);
)
va_start(args,methodID);
UNCHECKED()->CallVoidMethodV(env,obj,methodID,args);
@@ -1018,7 +1025,7 @@
va_list args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_call_object(thr, obj, methodID);
+ jniCheck::validate_call(thr, NULL, methodID, obj);
)
UNCHECKED()->CallVoidMethodV(env,obj,methodID,args);
thr->set_pending_jni_exception_check("CallVoidMethodV");
@@ -1032,7 +1039,7 @@
const jvalue * args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_call_object(thr, obj, methodID);
+ jniCheck::validate_call(thr, NULL, methodID, obj);
)
UNCHECKED()->CallVoidMethodA(env,obj,methodID,args);
thr->set_pending_jni_exception_check("CallVoidMethodA");
@@ -1049,8 +1056,7 @@
functionEnter(thr); \
va_list args; \
IN_VM( \
- jniCheck::validate_call_object(thr, obj, methodID); \
- jniCheck::validate_call_class(thr, clazz, methodID); \
+ jniCheck::validate_call(thr, clazz, methodID, obj); \
) \
va_start(args,methodID); \
ResultType result = UNCHECKED()->CallNonvirtual##Result##MethodV(env, \
@@ -1072,8 +1078,7 @@
va_list args)) \
functionEnter(thr); \
IN_VM( \
- jniCheck::validate_call_object(thr, obj, methodID); \
- jniCheck::validate_call_class(thr, clazz, methodID); \
+ jniCheck::validate_call(thr, clazz, methodID, obj); \
) \
ResultType result = UNCHECKED()->CallNonvirtual##Result##MethodV(env, \
obj, \
@@ -1093,8 +1098,7 @@
const jvalue * args)) \
functionEnter(thr); \
IN_VM( \
- jniCheck::validate_call_object(thr, obj, methodID); \
- jniCheck::validate_call_class(thr, clazz, methodID); \
+ jniCheck::validate_call(thr, clazz, methodID, obj); \
) \
ResultType result = UNCHECKED()->CallNonvirtual##Result##MethodA(env, \
obj, \
@@ -1125,8 +1129,7 @@
functionEnter(thr);
va_list args;
IN_VM(
- jniCheck::validate_call_object(thr, obj, methodID);
- jniCheck::validate_call_class(thr, clazz, methodID);
+ jniCheck::validate_call(thr, clazz, methodID, obj);
)
va_start(args,methodID);
UNCHECKED()->CallNonvirtualVoidMethodV(env,obj,clazz,methodID,args);
@@ -1143,8 +1146,7 @@
va_list args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_call_object(thr, obj, methodID);
- jniCheck::validate_call_class(thr, clazz, methodID);
+ jniCheck::validate_call(thr, clazz, methodID, obj);
)
UNCHECKED()->CallNonvirtualVoidMethodV(env,obj,clazz,methodID,args);
thr->set_pending_jni_exception_check("CallNonvirtualVoidMethodV");
@@ -1159,8 +1161,7 @@
const jvalue * args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_call_object(thr, obj, methodID);
- jniCheck::validate_call_class(thr, clazz, methodID);
+ jniCheck::validate_call(thr, clazz, methodID, obj);
)
UNCHECKED()->CallNonvirtualVoidMethodA(env,obj,clazz,methodID,args);
thr->set_pending_jni_exception_check("CallNonvirtualVoidMethodA");
@@ -1253,8 +1254,7 @@
functionEnter(thr); \
va_list args; \
IN_VM( \
- jniCheck::validate_jmethod_id(thr, methodID); \
- jniCheck::validate_class(thr, clazz, false); \
+ jniCheck::validate_call(thr, clazz, methodID); \
) \
va_start(args,methodID); \
ReturnType result = UNCHECKED()->CallStatic##Result##MethodV(env, \
@@ -1274,8 +1274,7 @@
va_list args)) \
functionEnter(thr); \
IN_VM( \
- jniCheck::validate_jmethod_id(thr, methodID); \
- jniCheck::validate_class(thr, clazz, false); \
+ jniCheck::validate_call(thr, clazz, methodID); \
) \
ReturnType result = UNCHECKED()->CallStatic##Result##MethodV(env, \
clazz, \
@@ -1293,8 +1292,7 @@
const jvalue *args)) \
functionEnter(thr); \
IN_VM( \
- jniCheck::validate_jmethod_id(thr, methodID); \
- jniCheck::validate_class(thr, clazz, false); \
+ jniCheck::validate_call(thr, clazz, methodID); \
) \
ReturnType result = UNCHECKED()->CallStatic##Result##MethodA(env, \
clazz, \
@@ -1323,8 +1321,7 @@
functionEnter(thr);
va_list args;
IN_VM(
- jniCheck::validate_jmethod_id(thr, methodID);
- jniCheck::validate_class(thr, cls, false);
+ jniCheck::validate_call(thr, cls, methodID);
)
va_start(args,methodID);
UNCHECKED()->CallStaticVoidMethodV(env,cls,methodID,args);
@@ -1340,8 +1337,7 @@
va_list args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_jmethod_id(thr, methodID);
- jniCheck::validate_class(thr, cls, false);
+ jniCheck::validate_call(thr, cls, methodID);
)
UNCHECKED()->CallStaticVoidMethodV(env,cls,methodID,args);
thr->set_pending_jni_exception_check("CallStaticVoidMethodV");
@@ -1355,8 +1351,7 @@
const jvalue * args))
functionEnter(thr);
IN_VM(
- jniCheck::validate_jmethod_id(thr, methodID);
- jniCheck::validate_class(thr, cls, false);
+ jniCheck::validate_call(thr, cls, methodID);
)
UNCHECKED()->CallStaticVoidMethodA(env,cls,methodID,args);
thr->set_pending_jni_exception_check("CallStaticVoidMethodA");
--- a/src/hotspot/share/prims/jniCheck.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jniCheck.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -51,8 +51,7 @@
static Klass* validate_class(JavaThread* thr, jclass clazz, bool allow_primitive = false);
static void validate_class_descriptor(JavaThread* thr, const char* name);
static void validate_throwable_klass(JavaThread* thr, Klass* klass);
- static void validate_call_object(JavaThread* thr, jobject obj, jmethodID method_id);
- static void validate_call_class(JavaThread* thr, jclass clazz, jmethodID method_id);
+ static void validate_call(JavaThread* thr, jclass clazz, jmethodID method_id, jobject obj = NULL);
static Method* validate_jmethod_id(JavaThread* thr, jmethodID method_id);
};
--- a/src/hotspot/share/prims/jvm.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvm.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -990,13 +990,21 @@
ResourceMark rm(THREAD);
Handle h_name (THREAD, JNIHandles::resolve_non_null(name));
- Handle string = java_lang_String::internalize_classname(h_name, CHECK_NULL);
-
- const char* str = java_lang_String::as_utf8_string(string());
+ char* str = java_lang_String::as_utf8_string(h_name());
+
// Sanity check, don't expect null
if (str == NULL) return NULL;
- const int str_len = (int)strlen(str);
+ // Internalize the string, converting '.' to '/' in string.
+ char* p = (char*)str;
+ while (*p != '\0') {
+ if (*p == '.') {
+ *p = '/';
+ }
+ p++;
+ }
+
+ const int str_len = (int)(p - str);
if (str_len > Symbol::max_length()) {
// It's impossible to create this class; the name cannot fit
// into the constant pool.
--- a/src/hotspot/share/prims/jvmtiEnv.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -3229,23 +3229,23 @@
jvmtiError
JvmtiEnv::DestroyRawMonitor(JvmtiRawMonitor * rmonitor) {
if (Threads::number_of_threads() == 0) {
- // Remove this monitor from pending raw monitors list
+ // Remove this monitor from pending raw monitors list
// if it has entered in onload or start phase.
JvmtiPendingMonitors::destroy(rmonitor);
} else {
Thread* thread = Thread::current();
- if (rmonitor->is_entered(thread)) {
+ if (rmonitor->owner() == thread) {
// The caller owns this monitor which we are about to destroy.
// We exit the underlying synchronization object so that the
// "delete monitor" call below can work without an assertion
// failure on systems that don't like destroying synchronization
// objects that are locked.
int r;
- intptr_t recursion = rmonitor->recursions();
- for (intptr_t i = 0; i <= recursion; i++) {
+ int recursion = rmonitor->recursions();
+ for (int i = 0; i <= recursion; i++) {
r = rmonitor->raw_exit(thread);
- assert(r == ObjectMonitor::OM_OK, "raw_exit should have worked");
- if (r != ObjectMonitor::OM_OK) { // robustness
+ assert(r == JvmtiRawMonitor::M_OK, "raw_exit should have worked");
+ if (r != JvmtiRawMonitor::M_OK) { // robustness
return JVMTI_ERROR_INTERNAL;
}
}
@@ -3271,7 +3271,7 @@
jvmtiError
JvmtiEnv::RawMonitorEnter(JvmtiRawMonitor * rmonitor) {
if (Threads::number_of_threads() == 0) {
- // No JavaThreads exist so ObjectMonitor enter cannot be
+ // No JavaThreads exist so JvmtiRawMonitor enter cannot be
// used, add this raw monitor to the pending list.
// The pending monitors will be actually entered when
// the VM is setup.
@@ -3279,20 +3279,10 @@
// in thread.cpp.
JvmtiPendingMonitors::enter(rmonitor);
} else {
- int r = 0;
Thread* thread = Thread::current();
-
if (thread->is_Java_thread()) {
JavaThread* current_thread = (JavaThread*)thread;
-#ifdef PROPER_TRANSITIONS
- // Not really unknown but ThreadInVMfromNative does more than we want
- ThreadInVMfromUnknown __tiv;
- {
- ThreadBlockInVM __tbivm(current_thread);
- r = rmonitor->raw_enter(current_thread);
- }
-#else
/* Transition to thread_blocked without entering vm state */
/* This is really evil. Normally you can't undo _thread_blocked */
/* transitions like this because it would cause us to miss a */
@@ -3308,22 +3298,11 @@
current_thread->frame_anchor()->walkable(), "Must be walkable");
current_thread->set_thread_state(_thread_blocked);
- r = rmonitor->raw_enter(current_thread);
+ rmonitor->raw_enter(current_thread);
// restore state, still at a safepoint safe state
current_thread->set_thread_state(state);
-
-#endif /* PROPER_TRANSITIONS */
- assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
} else {
- if (thread->is_Named_thread()) {
- r = rmonitor->raw_enter(thread);
- } else {
- ShouldNotReachHere();
- }
- }
-
- if (r != ObjectMonitor::OM_OK) { // robustness
- return JVMTI_ERROR_INTERNAL;
+ rmonitor->raw_enter(thread);
}
}
return JVMTI_ERROR_NONE;
@@ -3342,31 +3321,10 @@
err = JVMTI_ERROR_NOT_MONITOR_OWNER;
}
} else {
- int r = 0;
Thread* thread = Thread::current();
-
- if (thread->is_Java_thread()) {
- JavaThread* current_thread = (JavaThread*)thread;
-#ifdef PROPER_TRANSITIONS
- // Not really unknown but ThreadInVMfromNative does more than we want
- ThreadInVMfromUnknown __tiv;
-#endif /* PROPER_TRANSITIONS */
- r = rmonitor->raw_exit(current_thread);
- } else {
- if (thread->is_Named_thread()) {
- r = rmonitor->raw_exit(thread);
- } else {
- ShouldNotReachHere();
- }
- }
-
- if (r == ObjectMonitor::OM_ILLEGAL_MONITOR_STATE) {
+ int r = rmonitor->raw_exit(thread);
+ if (r == JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE) {
err = JVMTI_ERROR_NOT_MONITOR_OWNER;
- } else {
- assert(r == ObjectMonitor::OM_OK, "raw_exit should have worked");
- if (r != ObjectMonitor::OM_OK) { // robustness
- err = JVMTI_ERROR_INTERNAL;
- }
}
}
return err;
@@ -3381,14 +3339,7 @@
if (thread->is_Java_thread()) {
JavaThread* current_thread = (JavaThread*)thread;
-#ifdef PROPER_TRANSITIONS
- // Not really unknown but ThreadInVMfromNative does more than we want
- ThreadInVMfromUnknown __tiv;
- {
- ThreadBlockInVM __tbivm(current_thread);
- r = rmonitor->raw_wait(millis, true, current_thread);
- }
-#else
+
/* Transition to thread_blocked without entering vm state */
/* This is really evil. Normally you can't undo _thread_blocked */
/* transitions like this because it would cause us to miss a */
@@ -3408,57 +3359,31 @@
// restore state, still at a safepoint safe state
current_thread->set_thread_state(state);
-#endif /* PROPER_TRANSITIONS */
} else {
- if (thread->is_Named_thread()) {
r = rmonitor->raw_wait(millis, false, thread);
- } else {
- ShouldNotReachHere();
- }
+ assert(r != JvmtiRawMonitor::M_INTERRUPTED, "non-JavaThread can't be interrupted");
}
switch (r) {
- case ObjectMonitor::OM_INTERRUPTED:
+ case JvmtiRawMonitor::M_INTERRUPTED:
return JVMTI_ERROR_INTERRUPT;
- case ObjectMonitor::OM_ILLEGAL_MONITOR_STATE:
+ case JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE:
return JVMTI_ERROR_NOT_MONITOR_OWNER;
+ default:
+ return JVMTI_ERROR_NONE;
}
- assert(r == ObjectMonitor::OM_OK, "raw_wait should have worked");
- if (r != ObjectMonitor::OM_OK) { // robustness
- return JVMTI_ERROR_INTERNAL;
- }
-
- return JVMTI_ERROR_NONE;
} /* end RawMonitorWait */
// rmonitor - pre-checked for validity
jvmtiError
JvmtiEnv::RawMonitorNotify(JvmtiRawMonitor * rmonitor) {
- int r = 0;
Thread* thread = Thread::current();
-
- if (thread->is_Java_thread()) {
- JavaThread* current_thread = (JavaThread*)thread;
- // Not really unknown but ThreadInVMfromNative does more than we want
- ThreadInVMfromUnknown __tiv;
- r = rmonitor->raw_notify(current_thread);
- } else {
- if (thread->is_Named_thread()) {
- r = rmonitor->raw_notify(thread);
- } else {
- ShouldNotReachHere();
- }
- }
-
- if (r == ObjectMonitor::OM_ILLEGAL_MONITOR_STATE) {
+ int r = rmonitor->raw_notify(thread);
+
+ if (r == JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE) {
return JVMTI_ERROR_NOT_MONITOR_OWNER;
}
- assert(r == ObjectMonitor::OM_OK, "raw_notify should have worked");
- if (r != ObjectMonitor::OM_OK) { // robustness
- return JVMTI_ERROR_INTERNAL;
- }
-
return JVMTI_ERROR_NONE;
} /* end RawMonitorNotify */
@@ -3466,29 +3391,12 @@
// rmonitor - pre-checked for validity
jvmtiError
JvmtiEnv::RawMonitorNotifyAll(JvmtiRawMonitor * rmonitor) {
- int r = 0;
Thread* thread = Thread::current();
-
- if (thread->is_Java_thread()) {
- JavaThread* current_thread = (JavaThread*)thread;
- ThreadInVMfromUnknown __tiv;
- r = rmonitor->raw_notifyAll(current_thread);
- } else {
- if (thread->is_Named_thread()) {
- r = rmonitor->raw_notifyAll(thread);
- } else {
- ShouldNotReachHere();
- }
- }
-
- if (r == ObjectMonitor::OM_ILLEGAL_MONITOR_STATE) {
+ int r = rmonitor->raw_notifyAll(thread);
+
+ if (r == JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE) {
return JVMTI_ERROR_NOT_MONITOR_OWNER;
}
- assert(r == ObjectMonitor::OM_OK, "raw_notifyAll should have worked");
- if (r != ObjectMonitor::OM_OK) { // robustness
- return JVMTI_ERROR_INTERNAL;
- }
-
return JVMTI_ERROR_NONE;
} /* end RawMonitorNotifyAll */
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -659,10 +659,9 @@
// thread is not doing an Object.wait() call
mon = java_thread->current_pending_monitor();
if (mon != NULL) {
- // The thread is trying to enter() or raw_enter() an ObjectMonitor.
+ // The thread is trying to enter() an ObjectMonitor.
obj = (oop)mon->object();
- // If obj == NULL, then ObjectMonitor is raw which doesn't count
- // as contended for this API
+ assert(obj != NULL, "ObjectMonitor should have a valid object!");
}
// implied else: no contended ObjectMonitor
} else {
--- a/src/hotspot/share/prims/jvmtiExport.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiExport.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -304,7 +304,7 @@
bool JvmtiExport::_can_modify_any_class = false;
bool JvmtiExport::_can_walk_any_space = false;
-bool JvmtiExport::_has_redefined_a_class = false;
+uint64_t JvmtiExport::_redefinition_count = 0;
bool JvmtiExport::_all_dependencies_are_recorded = false;
//
@@ -1202,6 +1202,7 @@
bool JvmtiExport::_can_post_method_exit = false;
bool JvmtiExport::_can_pop_frame = false;
bool JvmtiExport::_can_force_early_return = false;
+bool JvmtiExport::_can_get_owned_monitor_info = false;
bool JvmtiExport::_early_vmstart_recorded = false;
--- a/src/hotspot/share/prims/jvmtiExport.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiExport.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -91,6 +91,7 @@
JVMTI_SUPPORT_FLAG(can_force_early_return)
JVMTI_SUPPORT_FLAG(early_vmstart_recorded)
+ JVMTI_SUPPORT_FLAG(can_get_owned_monitor_info) // includes can_get_owned_monitor_stack_depth_info
friend class JvmtiEventControllerPrivate; // should only modify these flags
JVMTI_SUPPORT_FLAG(should_post_single_step)
@@ -173,10 +174,10 @@
// one or more classes during the lifetime of the VM. The flag should
// only be set by the friend class and can be queried by other sub
// systems as needed to relax invariant checks.
- static bool _has_redefined_a_class;
+ static uint64_t _redefinition_count;
friend class VM_RedefineClasses;
- inline static void set_has_redefined_a_class() {
- JVMTI_ONLY(_has_redefined_a_class = true;)
+ inline static void increment_redefinition_count() {
+ JVMTI_ONLY(_redefinition_count++;)
}
// Flag to indicate if the compiler has recorded all dependencies. When the
// can_redefine_classes capability is enabled in the OnLoad phase then the compiler
@@ -188,10 +189,16 @@
public:
inline static bool has_redefined_a_class() {
- JVMTI_ONLY(return _has_redefined_a_class);
+ JVMTI_ONLY(return _redefinition_count != 0);
NOT_JVMTI(return false);
}
+ // Only set in safepoint, so no memory ordering needed.
+ inline static uint64_t redefinition_count() {
+ JVMTI_ONLY(return _redefinition_count);
+ NOT_JVMTI(return 0);
+ }
+
inline static bool all_dependencies_are_recorded() {
return _all_dependencies_are_recorded;
}
--- a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -367,6 +367,8 @@
JvmtiExport::set_can_pop_frame(avail.can_pop_frame);
JvmtiExport::set_can_force_early_return(avail.can_force_early_return);
JvmtiExport::set_should_clean_up_heap_objects(avail.can_generate_breakpoint_events);
+ JvmtiExport::set_can_get_owned_monitor_info(avail.can_get_owned_monitor_info ||
+ avail.can_get_owned_monitor_stack_depth_info);
}
#ifndef PRODUCT
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -30,21 +30,23 @@
#include "runtime/orderAccess.hpp"
#include "runtime/thread.inline.hpp"
-GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1,true);
+JvmtiRawMonitor::QNode::QNode(Thread* thread) : _next(NULL), _prev(NULL),
+ _event(thread->_ParkEvent),
+ _notified(0), _t_state(TS_RUN) {
+}
+
+GrowableArray<JvmtiRawMonitor*>* JvmtiPendingMonitors::_monitors =
+ new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1, true);
void JvmtiPendingMonitors::transition_raw_monitors() {
assert((Threads::number_of_threads()==1),
- "Java thread has not created yet or more than one java thread \
-is running. Raw monitor transition will not work");
- JavaThread *current_java_thread = JavaThread::current();
+ "Java thread has not been created yet or more than one java thread "
+ "is running. Raw monitor transition will not work");
+ JavaThread* current_java_thread = JavaThread::current();
assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
- {
- ThreadBlockInVM __tbivm(current_java_thread);
- for(int i=0; i< count(); i++) {
- JvmtiRawMonitor *rmonitor = monitors()->at(i);
- int r = rmonitor->raw_enter(current_java_thread);
- assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
- }
+ for (int i = 0; i < count(); i++) {
+ JvmtiRawMonitor* rmonitor = monitors()->at(i);
+ rmonitor->raw_enter(current_java_thread);
}
// pending monitors are converted to real monitor so delete them all.
dispose();
@@ -54,13 +56,16 @@
// class JvmtiRawMonitor
//
-JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
+JvmtiRawMonitor::JvmtiRawMonitor(const char* name) : _owner(NULL),
+ _recursions(0),
+ _entry_list(NULL),
+ _wait_set(NULL),
+ _waiters(0),
+ _magic(JVMTI_RM_MAGIC),
+ _name(NULL) {
#ifdef ASSERT
_name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal), name);
-#else
- _name = NULL;
#endif
- _magic = JVMTI_RM_MAGIC;
}
JvmtiRawMonitor::~JvmtiRawMonitor() {
@@ -100,181 +105,179 @@
}
// -------------------------------------------------------------------------
-// The raw monitor subsystem is entirely distinct from normal
-// java-synchronization or jni-synchronization. raw monitors are not
+// The JVMTI raw monitor subsystem is entirely distinct from normal
+// java-synchronization or jni-synchronization. JVMTI raw monitors are not
// associated with objects. They can be implemented in any manner
// that makes sense. The original implementors decided to piggy-back
-// the raw-monitor implementation on the existing Java objectMonitor mechanism.
-// This flaw needs to fixed. We should reimplement raw monitors as sui-generis.
-// Specifically, we should not implement raw monitors via java monitors.
-// Time permitting, we should disentangle and deconvolve the two implementations
-// and move the resulting raw monitor implementation over to the JVMTI directories.
-// Ideally, the raw monitor implementation would be built on top of
-// park-unpark and nothing else.
-//
-// raw monitors are used mainly by JVMTI
-// The raw monitor implementation borrows the ObjectMonitor structure,
-// but the operators are degenerate and extremely simple.
-//
-// Mixed use of a single objectMonitor instance -- as both a raw monitor
-// and a normal java monitor -- is not permissible.
+// the raw-monitor implementation on the existing Java ObjectMonitor mechanism.
+// Now we just use a simplified form of that ObjectMonitor code.
//
// Note that we use the single RawMonitor_lock to protect queue operations for
// _all_ raw monitors. This is a scalability impediment, but since raw monitor usage
-// is deprecated and rare, this is not of concern. The RawMonitor_lock can not
+// is fairly rare, this is not of concern. The RawMonitor_lock can not
// be held indefinitely. The critical sections must be short and bounded.
//
// -------------------------------------------------------------------------
-int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
+void JvmtiRawMonitor::simple_enter(Thread* self) {
for (;;) {
- if (Atomic::replace_if_null(Self, &_owner)) {
- return OS_OK ;
+ if (Atomic::replace_if_null(self, &_owner)) {
+ return;
}
- ObjectWaiter Node (Self) ;
- Self->_ParkEvent->reset() ; // strictly optional
- Node.TState = ObjectWaiter::TS_ENTER ;
+ QNode node(self);
+ self->_ParkEvent->reset(); // strictly optional
+ node._t_state = QNode::TS_ENTER;
- RawMonitor_lock->lock_without_safepoint_check() ;
- Node._next = _EntryList ;
- _EntryList = &Node ;
- OrderAccess::fence() ;
- if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) {
- _EntryList = Node._next ;
- RawMonitor_lock->unlock() ;
- return OS_OK ;
+ RawMonitor_lock->lock_without_safepoint_check();
+ node._next = _entry_list;
+ _entry_list = &node;
+ OrderAccess::fence();
+ if (_owner == NULL && Atomic::replace_if_null(self, &_owner)) {
+ _entry_list = node._next;
+ RawMonitor_lock->unlock();
+ return;
}
- RawMonitor_lock->unlock() ;
- while (Node.TState == ObjectWaiter::TS_ENTER) {
- Self->_ParkEvent->park() ;
+ RawMonitor_lock->unlock();
+ while (node._t_state == QNode::TS_ENTER) {
+ self->_ParkEvent->park();
}
}
}
-int JvmtiRawMonitor::SimpleExit (Thread * Self) {
- guarantee (_owner == Self, "invariant") ;
- OrderAccess::release_store(&_owner, (void*)NULL) ;
- OrderAccess::fence() ;
- if (_EntryList == NULL) return OS_OK ;
- ObjectWaiter * w ;
+void JvmtiRawMonitor::simple_exit(Thread* self) {
+ guarantee(_owner == self, "invariant");
+ OrderAccess::release_store(&_owner, (Thread*)NULL);
+ OrderAccess::fence();
+ if (_entry_list == NULL) {
+ return;
+ }
- RawMonitor_lock->lock_without_safepoint_check() ;
- w = _EntryList ;
+ RawMonitor_lock->lock_without_safepoint_check();
+ QNode* w = _entry_list;
if (w != NULL) {
- _EntryList = w->_next ;
+ _entry_list = w->_next;
}
- RawMonitor_lock->unlock() ;
+ RawMonitor_lock->unlock();
if (w != NULL) {
- guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
- // Once we set TState to TS_RUN the waiting thread can complete
- // SimpleEnter and 'w' is pointing into random stack space. So we have
- // to ensure we extract the ParkEvent (which is in type-stable memory)
- // before we set the state, and then don't access 'w'.
- ParkEvent * ev = w->_event ;
- OrderAccess::loadstore();
- w->TState = ObjectWaiter::TS_RUN ;
- OrderAccess::fence() ;
- ev->unpark() ;
+ guarantee(w ->_t_state == QNode::TS_ENTER, "invariant");
+ // Once we set _t_state to TS_RUN the waiting thread can complete
+ // simple_enter and 'w' is pointing into random stack space. So we have
+ // to ensure we extract the ParkEvent (which is in type-stable memory)
+ // before we set the state, and then don't access 'w'.
+ ParkEvent* ev = w->_event;
+ OrderAccess::loadstore();
+ w->_t_state = QNode::TS_RUN;
+ OrderAccess::fence();
+ ev->unpark();
}
- return OS_OK ;
+ return;
}
-int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) {
- guarantee (_owner == Self , "invariant") ;
- guarantee (_recursions == 0, "invariant") ;
+int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) {
+ guarantee(_owner == self , "invariant");
+ guarantee(_recursions == 0, "invariant");
- ObjectWaiter Node (Self) ;
- Node._notified = 0 ;
- Node.TState = ObjectWaiter::TS_WAIT ;
+ QNode node(self);
+ node._notified = 0;
+ node._t_state = QNode::TS_WAIT;
- RawMonitor_lock->lock_without_safepoint_check() ;
- Node._next = _WaitSet ;
- _WaitSet = &Node ;
- RawMonitor_lock->unlock() ;
+ RawMonitor_lock->lock_without_safepoint_check();
+ node._next = _wait_set;
+ _wait_set = &node;
+ RawMonitor_lock->unlock();
- SimpleExit (Self) ;
- guarantee (_owner != Self, "invariant") ;
+ simple_exit(self);
+ guarantee(_owner != self, "invariant");
- int ret = OS_OK ;
+ int ret = OS_OK;
if (millis <= 0) {
- Self->_ParkEvent->park();
+ self->_ParkEvent->park();
} else {
- ret = Self->_ParkEvent->park(millis);
+ ret = self->_ParkEvent->park(millis);
}
// If thread still resides on the waitset then unlink it.
// Double-checked locking -- the usage is safe in this context
- // as TState is volatile and the lock-unlock operators are
+ // as _t_state is volatile and the lock-unlock operators are
// serializing (barrier-equivalent).
- if (Node.TState == ObjectWaiter::TS_WAIT) {
- RawMonitor_lock->lock_without_safepoint_check() ;
- if (Node.TState == ObjectWaiter::TS_WAIT) {
+ if (node._t_state == QNode::TS_WAIT) {
+ RawMonitor_lock->lock_without_safepoint_check();
+ if (node._t_state == QNode::TS_WAIT) {
// Simple O(n) unlink, but performance isn't critical here.
- ObjectWaiter * p ;
- ObjectWaiter * q = NULL ;
- for (p = _WaitSet ; p != &Node; p = p->_next) {
- q = p ;
+ QNode* p;
+ QNode* q = NULL;
+ for (p = _wait_set; p != &node; p = p->_next) {
+ q = p;
}
- guarantee (p == &Node, "invariant") ;
+ guarantee(p == &node, "invariant");
if (q == NULL) {
- guarantee (p == _WaitSet, "invariant") ;
- _WaitSet = p->_next ;
+ guarantee (p == _wait_set, "invariant");
+ _wait_set = p->_next;
} else {
- guarantee (p == q->_next, "invariant") ;
- q->_next = p->_next ;
+ guarantee(p == q->_next, "invariant");
+ q->_next = p->_next;
}
- Node.TState = ObjectWaiter::TS_RUN ;
+ node._t_state = QNode::TS_RUN;
}
- RawMonitor_lock->unlock() ;
+ RawMonitor_lock->unlock();
}
- guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
- SimpleEnter (Self) ;
+ guarantee(node._t_state == QNode::TS_RUN, "invariant");
+ simple_enter(self);
- guarantee (_owner == Self, "invariant") ;
- guarantee (_recursions == 0, "invariant") ;
- return ret ;
+ guarantee(_owner == self, "invariant");
+ guarantee(_recursions == 0, "invariant");
+ return ret;
}
-int JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) {
- guarantee (_owner == Self, "invariant") ;
- if (_WaitSet == NULL) return OS_OK ;
+void JvmtiRawMonitor::simple_notify(Thread* self, bool all) {
+ guarantee(_owner == self, "invariant");
+ if (_wait_set == NULL) {
+ return;
+ }
// We have two options:
- // A. Transfer the threads from the WaitSet to the EntryList
- // B. Remove the thread from the WaitSet and unpark() it.
+ // A. Transfer the threads from the _wait_set to the _entry_list
+ // B. Remove the thread from the _wait_set and unpark() it.
//
// We use (B), which is crude and results in lots of futile
// context switching. In particular (B) induces lots of contention.
- ParkEvent * ev = NULL ; // consider using a small auto array ...
- RawMonitor_lock->lock_without_safepoint_check() ;
+ ParkEvent* ev = NULL; // consider using a small auto array ...
+ RawMonitor_lock->lock_without_safepoint_check();
for (;;) {
- ObjectWaiter * w = _WaitSet ;
- if (w == NULL) break ;
- _WaitSet = w->_next ;
- if (ev != NULL) { ev->unpark(); ev = NULL; }
- ev = w->_event ;
- OrderAccess::loadstore() ;
- w->TState = ObjectWaiter::TS_RUN ;
- OrderAccess::storeload();
- if (!All) break ;
+ QNode* w = _wait_set;
+ if (w == NULL) break;
+ _wait_set = w->_next;
+ if (ev != NULL) {
+ ev->unpark();
+ ev = NULL;
+ }
+ ev = w->_event;
+ OrderAccess::loadstore();
+ w->_t_state = QNode::TS_RUN;
+ OrderAccess::storeload();
+ if (!all) {
+ break;
+ }
}
- RawMonitor_lock->unlock() ;
- if (ev != NULL) ev->unpark();
- return OS_OK ;
+ RawMonitor_lock->unlock();
+ if (ev != NULL) {
+ ev->unpark();
+ }
+ return;
}
// Any JavaThread will enter here with state _thread_blocked
-int JvmtiRawMonitor::raw_enter(TRAPS) {
- void * Contended ;
-
+void JvmtiRawMonitor::raw_enter(Thread* self) {
+ void* contended;
+ JavaThread* jt = NULL;
// don't enter raw monitor if thread is being externally suspended, it will
// surprise the suspender if a "suspended" thread can still enter monitor
- JavaThread * jt = (JavaThread *)THREAD;
- if (THREAD->is_Java_thread()) {
+ if (self->is_Java_thread()) {
+ jt = (JavaThread*)self;
jt->SR_lock()->lock_without_safepoint_check();
while (jt->is_external_suspend()) {
jt->SR_lock()->unlock();
@@ -282,150 +285,140 @@
jt->SR_lock()->lock_without_safepoint_check();
}
// guarded by SR_lock to avoid racing with new external suspend requests.
- Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL);
+ contended = Atomic::cmpxchg(jt, &_owner, (Thread*)NULL);
jt->SR_lock()->unlock();
} else {
- Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL);
+ contended = Atomic::cmpxchg(self, &_owner, (Thread*)NULL);
}
- if (Contended == THREAD) {
- _recursions ++ ;
- return OM_OK ;
+ if (contended == self) {
+ _recursions++;
+ return;
}
- if (Contended == NULL) {
- guarantee (_owner == THREAD, "invariant") ;
- guarantee (_recursions == 0, "invariant") ;
- return OM_OK ;
+ if (contended == NULL) {
+ guarantee(_owner == self, "invariant");
+ guarantee(_recursions == 0, "invariant");
+ return;
}
- THREAD->set_current_pending_monitor(this);
-
- if (!THREAD->is_Java_thread()) {
- // No other non-Java threads besides VM thread would acquire
- // a raw monitor.
- assert(THREAD->is_VM_thread(), "must be VM thread");
- SimpleEnter (THREAD) ;
- } else {
- guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
- for (;;) {
- jt->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self()
- SimpleEnter (THREAD) ;
-
- // were we externally suspended while we were waiting?
- if (!jt->handle_special_suspend_equivalent_condition()) break ;
+ self->set_current_pending_raw_monitor(this);
- // This thread was externally suspended
- //
- // This logic isn't needed for JVMTI raw monitors,
- // but doesn't hurt just in case the suspend rules change. This
- // logic is needed for the JvmtiRawMonitor.wait() reentry phase.
- // We have reentered the contended monitor, but while we were
- // waiting another thread suspended us. We don't want to reenter
- // the monitor while suspended because that would surprise the
- // thread that suspended us.
- //
- // Drop the lock -
- SimpleExit (THREAD) ;
-
- jt->java_suspend_self();
- }
-
- assert(_owner == THREAD, "Fatal error with monitor owner!");
- assert(_recursions == 0, "Fatal error with monitor recursions!");
- }
+ if (!self->is_Java_thread()) {
+ simple_enter(self);
+ } else {
+ guarantee(jt->thread_state() == _thread_blocked, "invariant");
+ for (;;) {
+ jt->set_suspend_equivalent();
+ // cleared by handle_special_suspend_equivalent_condition() or
+ // java_suspend_self()
+ simple_enter(jt);
- THREAD->set_current_pending_monitor(NULL);
- guarantee (_recursions == 0, "invariant") ;
- return OM_OK;
-}
-
-// Used mainly for JVMTI raw monitor implementation
-// Also used for JvmtiRawMonitor::wait().
-int JvmtiRawMonitor::raw_exit(TRAPS) {
- if (THREAD != _owner) {
- return OM_ILLEGAL_MONITOR_STATE;
- }
- if (_recursions > 0) {
- --_recursions ;
- return OM_OK ;
- }
-
- void * List = _EntryList ;
- SimpleExit (THREAD) ;
+ // were we externally suspended while we were waiting?
+ if (!jt->handle_special_suspend_equivalent_condition()) {
+ break;
+ }
- return OM_OK;
-}
-
-// Used for JVMTI raw monitor implementation.
-// All JavaThreads will enter here with state _thread_blocked
-
-int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
- if (THREAD != _owner) {
- return OM_ILLEGAL_MONITOR_STATE;
- }
+ // This thread was externally suspended
+ // We have reentered the contended monitor, but while we were
+ // waiting another thread suspended us. We don't want to reenter
+ // the monitor while suspended because that would surprise the
+ // thread that suspended us.
+ //
+ // Drop the lock
+ simple_exit(jt);
- // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
- // The caller must be able to tolerate spurious returns from raw_wait().
- THREAD->_ParkEvent->reset() ;
- OrderAccess::fence() ;
-
- // check interrupt event
- if (interruptible) {
- assert(THREAD->is_Java_thread(), "Only JavaThreads can be interruptible");
- JavaThread* jt = (JavaThread*) THREAD;
- if (jt->is_interrupted(true)) {
- return OM_INTERRUPTED;
+ jt->java_suspend_self();
}
}
- intptr_t save = _recursions ;
- _recursions = 0 ;
- _waiters ++ ;
- if (THREAD->is_Java_thread()) {
- guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
- ((JavaThread *)THREAD)->set_suspend_equivalent();
- }
- int rv = SimpleWait (THREAD, millis) ;
- _recursions = save ;
- _waiters -- ;
+ self->set_current_pending_raw_monitor(NULL);
+
+ guarantee(_owner == self, "invariant");
+ guarantee(_recursions == 0, "invariant");
+}
- guarantee (THREAD == _owner, "invariant") ;
- if (THREAD->is_Java_thread()) {
- JavaThread * jSelf = (JavaThread *) THREAD ;
- for (;;) {
- if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
- SimpleExit (THREAD) ;
- jSelf->java_suspend_self();
- SimpleEnter (THREAD) ;
- jSelf->set_suspend_equivalent() ;
- }
+int JvmtiRawMonitor::raw_exit(Thread* self) {
+ if (self != _owner) {
+ return M_ILLEGAL_MONITOR_STATE;
}
- guarantee (THREAD == _owner, "invariant") ;
+ if (_recursions > 0) {
+ _recursions--;
+ } else {
+ simple_exit(self);
+ }
- if (interruptible) {
- JavaThread* jt = (JavaThread*) THREAD;
- if (jt->is_interrupted(true)) {
- return OM_INTERRUPTED;
- }
- }
- return OM_OK ;
+ return M_OK;
}
-int JvmtiRawMonitor::raw_notify(TRAPS) {
- if (THREAD != _owner) {
- return OM_ILLEGAL_MONITOR_STATE;
+// All JavaThreads will enter here with state _thread_blocked
+
+int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, Thread* self) {
+ if (self != _owner) {
+ return M_ILLEGAL_MONITOR_STATE;
+ }
+
+ // To avoid spurious wakeups we reset the parkevent. This is strictly optional.
+ // The caller must be able to tolerate spurious returns from raw_wait().
+ self->_ParkEvent->reset();
+ OrderAccess::fence();
+
+ JavaThread* jt = NULL;
+ // check interrupt event
+ if (interruptible) {
+ assert(self->is_Java_thread(), "Only JavaThreads can be interruptible");
+ jt = (JavaThread*)self;
+ if (jt->is_interrupted(true)) {
+ return M_INTERRUPTED;
+ }
+ } else {
+ assert(!self->is_Java_thread(), "JavaThreads must be interuptible");
}
- SimpleNotify (THREAD, false) ;
- return OM_OK;
+
+ intptr_t save = _recursions;
+ _recursions = 0;
+ _waiters++;
+ if (self->is_Java_thread()) {
+ guarantee(jt->thread_state() == _thread_blocked, "invariant");
+ jt->set_suspend_equivalent();
+ }
+ int rv = simple_wait(self, millis);
+ _recursions = save;
+ _waiters--;
+
+ guarantee(self == _owner, "invariant");
+ if (self->is_Java_thread()) {
+ for (;;) {
+ if (!jt->handle_special_suspend_equivalent_condition()) {
+ break;
+ }
+ simple_exit(jt);
+ jt->java_suspend_self();
+ simple_enter(jt);
+ jt->set_suspend_equivalent();
+ }
+ guarantee(jt == _owner, "invariant");
+ }
+
+ if (interruptible && jt->is_interrupted(true)) {
+ return M_INTERRUPTED;
+ }
+
+ return M_OK;
}
-int JvmtiRawMonitor::raw_notifyAll(TRAPS) {
- if (THREAD != _owner) {
- return OM_ILLEGAL_MONITOR_STATE;
+int JvmtiRawMonitor::raw_notify(Thread* self) {
+ if (self != _owner) {
+ return M_ILLEGAL_MONITOR_STATE;
}
- SimpleNotify (THREAD, true) ;
- return OM_OK;
+ simple_notify(self, false);
+ return M_OK;
}
+
+int JvmtiRawMonitor::raw_notifyAll(Thread* self) {
+ if (self != _owner) {
+ return M_ILLEGAL_MONITOR_STATE;
+ }
+ simple_notify(self, true);
+ return M_OK;
+}
--- a/src/hotspot/share/prims/jvmtiRawMonitor.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -25,7 +25,8 @@
#ifndef SHARE_PRIMS_JVMTIRAWMONITOR_HPP
#define SHARE_PRIMS_JVMTIRAWMONITOR_HPP
-#include "runtime/objectMonitor.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/park.hpp"
#include "utilities/growableArray.hpp"
//
@@ -33,32 +34,70 @@
//
// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
//
-// Wrapper for ObjectMonitor class that saves the Monitor's name
+// A simplified version of the ObjectMonitor code.
//
-class JvmtiRawMonitor : public ObjectMonitor {
-private:
- int _magic;
- char * _name;
+class JvmtiRawMonitor : public CHeapObj<mtSynchronizer> {
+
+ // Helper class to allow Threads to be linked into queues.
+ // This is a stripped down version of ObjectWaiter.
+ class QNode : public StackObj {
+ friend class JvmtiRawMonitor;
+ enum TStates { TS_READY, TS_RUN, TS_WAIT, TS_ENTER };
+ QNode* volatile _next;
+ QNode* volatile _prev;
+ ParkEvent* _event;
+ volatile int _notified;
+ volatile TStates _t_state;
+
+ QNode(Thread* thread);
+ };
+
+ Thread* volatile _owner; // pointer to owning thread
+ volatile int _recursions; // recursion count, 0 for first entry
+ QNode* volatile _entry_list; // Threads blocked on entry or reentry.
+ // The list is actually composed of nodes,
+ // acting as proxies for Threads.
+ QNode* volatile _wait_set; // Threads wait()ing on the monitor
+ volatile jint _waiters; // number of waiting threads
+ int _magic;
+ char* _name;
// JVMTI_RM_MAGIC is set in contructor and unset in destructor.
enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
- int SimpleEnter (Thread * Self) ;
- int SimpleExit (Thread * Self) ;
- int SimpleWait (Thread * Self, jlong millis) ;
- int SimpleNotify (Thread * Self, bool All) ;
+ void simple_enter(Thread* self);
+ void simple_exit(Thread* self);
+ int simple_wait(Thread* self, jlong millis);
+ void simple_notify(Thread* self, bool all);
+
+ public:
-public:
- JvmtiRawMonitor(const char *name);
+ // return codes
+ enum {
+ M_OK, // no error
+ M_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
+ M_INTERRUPTED // Thread.interrupt()
+ };
+
+ // Non-aborting operator new
+ void* operator new(size_t size) throw() {
+ return CHeapObj::operator new(size, std::nothrow);
+ }
+
+ JvmtiRawMonitor(const char* name);
~JvmtiRawMonitor();
- int raw_enter(TRAPS);
- int raw_exit(TRAPS);
- int raw_wait(jlong millis, bool interruptable, TRAPS);
- int raw_notify(TRAPS);
- int raw_notifyAll(TRAPS);
- int magic() { return _magic; }
- const char *get_name() { return _name; }
- bool is_valid();
+
+ Thread* owner() const { return _owner; }
+ void set_owner(Thread* owner) { _owner = owner; }
+ int recursions() const { return _recursions; }
+ void raw_enter(Thread* self);
+ int raw_exit(Thread* self);
+ int raw_wait(jlong millis, bool interruptible, Thread* self);
+ int raw_notify(Thread* self);
+ int raw_notifyAll(Thread* self);
+ int magic() const { return _magic; }
+ const char* get_name() const { return _name; }
+ bool is_valid();
};
// Onload pending raw monitors
@@ -67,8 +106,8 @@
// VM is fully initialized.
class JvmtiPendingMonitors : public AllStatic {
-private:
- static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
+ private:
+ static GrowableArray<JvmtiRawMonitor*>* _monitors; // Cache raw monitor enter
inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
@@ -76,8 +115,8 @@
delete monitors();
}
-public:
- static void enter(JvmtiRawMonitor *monitor) {
+ public:
+ static void enter(JvmtiRawMonitor* monitor) {
monitors()->append(monitor);
}
@@ -85,14 +124,14 @@
return monitors()->length();
}
- static void destroy(JvmtiRawMonitor *monitor) {
+ static void destroy(JvmtiRawMonitor* monitor) {
while (monitors()->contains(monitor)) {
monitors()->remove(monitor);
}
}
// Return false if monitor is not found in the list.
- static bool exit(JvmtiRawMonitor *monitor) {
+ static bool exit(JvmtiRawMonitor* monitor) {
if (monitors()->contains(monitor)) {
monitors()->remove(monitor);
return true;
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -232,9 +232,9 @@
ResolvedMethodTable::adjust_method_entries(&trace_name_printed);
}
- // Set flag indicating that some invariants are no longer true.
+ // Increment flag indicating that some invariants are no longer true.
// See jvmtiExport.hpp for detailed explanation.
- JvmtiExport::set_has_redefined_a_class();
+ JvmtiExport::increment_redefinition_count();
// check_class() is optionally called for product bits, but is
// always called for non-product bits.
@@ -3528,15 +3528,6 @@
"should be replaced");
}
}
- // Update deleted jmethodID
- for (int j = 0; j < _deleted_methods_length; ++j) {
- Method* old_method = _deleted_methods[j];
- jmethodID jmid = old_method->find_jmethod_id_or_null();
- if (jmid != NULL) {
- // Change the jmethodID to point to NSME.
- Method::change_method_associated_with_jmethod_id(jmid, Universe::throw_no_such_method_error());
- }
- }
}
int VM_RedefineClasses::check_methods_and_mark_as_obsolete() {
--- a/src/hotspot/share/prims/jvmtiThreadState.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/jvmtiThreadState.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/prims/methodHandles.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/methodHandles.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -41,7 +41,6 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/methodHandles.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/prims/whitebox.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -32,6 +32,7 @@
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/methodMatcher.hpp"
#include "compiler/directivesParser.hpp"
#include "gc/shared/gcConfig.hpp"
@@ -58,7 +59,6 @@
#include "prims/wbtestmethods/parserTests.hpp"
#include "prims/whitebox.inline.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/flags/jvmFlag.hpp"
@@ -1735,16 +1735,30 @@
WB_END
WB_ENTRY(void, WB_AssertMatchingSafepointCalls(JNIEnv* env, jobject o, jboolean mutexSafepointValue, jboolean attemptedNoSafepointValue))
- Monitor::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
- Monitor::_safepoint_check_always :
- Monitor::_safepoint_check_never;
- Monitor::SafepointCheckFlag sfpt_check_attempted = attemptedNoSafepointValue ?
- Monitor::_no_safepoint_check_flag :
- Monitor::_safepoint_check_flag;
+ Mutex::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
+ Mutex::_safepoint_check_always :
+ Mutex::_safepoint_check_never;
+ Mutex::SafepointCheckFlag sfpt_check_attempted = attemptedNoSafepointValue ?
+ Mutex::_no_safepoint_check_flag :
+ Mutex::_safepoint_check_flag;
MutexLocker ml(new Mutex(Mutex::leaf, "SFPT_Test_lock", true, sfpt_check_required),
sfpt_check_attempted);
WB_END
+WB_ENTRY(void, WB_AssertSpecialLock(JNIEnv* env, jobject o, jboolean allowVMBlock, jboolean safepointCheck))
+ // Create a special lock violating condition in value
+ Mutex::SafepointCheckRequired sfpt_check_required = safepointCheck ?
+ Mutex::_safepoint_check_always :
+ Mutex::_safepoint_check_never;
+ Mutex::SafepointCheckFlag safepoint_check = safepointCheck ?
+ Monitor::_safepoint_check_flag :
+ Monitor::_no_safepoint_check_flag;
+
+ MutexLocker ml(new Mutex(Mutex::special, "SpecialTest_lock", allowVMBlock, sfpt_check_required), safepoint_check);
+ // If the lock above succeeds, try to safepoint to test the NSV implied with this special lock.
+ ThreadBlockInVM tbivm(JavaThread::current());
+WB_END
+
WB_ENTRY(jboolean, WB_IsMonitorInflated(JNIEnv* env, jobject wb, jobject obj))
oop obj_oop = JNIHandles::resolve(obj);
return (jboolean) obj_oop->mark().has_monitor();
@@ -2322,6 +2336,7 @@
{CC"AddModuleExportsToAll", CC"(Ljava/lang/Object;Ljava/lang/String;)V",
(void*)&WB_AddModuleExportsToAll },
{CC"assertMatchingSafepointCalls", CC"(ZZ)V", (void*)&WB_AssertMatchingSafepointCalls },
+ {CC"assertSpecialLock", CC"(ZZ)V", (void*)&WB_AssertSpecialLock },
{CC"isMonitorInflated0", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated },
{CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint },
{CC"getConstantPool0", CC"(Ljava/lang/Class;)J", (void*)&WB_GetConstantPool },
--- a/src/hotspot/share/runtime/arguments.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/arguments.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -526,7 +526,6 @@
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "UseMembar", JDK_Version::jdk(10), JDK_Version::jdk(12), JDK_Version::undefined() },
- { "CompilationPolicyChoice", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::undefined() },
{ "AllowJNIEnvProxy", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "ThreadLocalHandshakes", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
@@ -534,6 +533,7 @@
{ "FieldsAllocationStyle", JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
{ "CompactFields", JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
{ "MonitorBound", JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
+ { "G1RSetScanBlockSize", JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
@@ -547,6 +547,7 @@
{ "SharedReadOnlySize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "SharedMiscDataSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
+ { "CompilationPolicyChoice", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "FailOverToOldVerifier", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "BindGCTaskThreadsToCPUs", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
{ "UseGCTaskAffinity", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
@@ -1453,7 +1454,7 @@
"--patch-module"
};
void Arguments::check_unsupported_dumping_properties() {
- assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
+ assert(is_dumping_archive(),
"this function is only used with CDS dump time");
assert(ARRAY_SIZE(unsupported_properties) == ARRAY_SIZE(unsupported_options), "must be");
// If a vm option is found in the unsupported_options array, vm will exit with an error message.
@@ -3536,7 +3537,7 @@
SharedArchivePath = get_default_shared_archive_path();
} else {
int archives = num_archives(SharedArchiveFile);
- if (DynamicDumpSharedSpaces || DumpSharedSpaces) {
+ if (is_dumping_archive()) {
if (archives > 1) {
vm_exit_during_initialization(
"Cannot have more than 1 archive file specified in -XX:SharedArchiveFile during CDS dumping");
@@ -3549,7 +3550,7 @@
}
}
}
- if (!DynamicDumpSharedSpaces && !DumpSharedSpaces){
+ if (!is_dumping_archive()){
if (archives > 2) {
vm_exit_during_initialization(
"Cannot have more than 2 archive files specified in the -XX:SharedArchiveFile option");
--- a/src/hotspot/share/runtime/arguments.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/arguments.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -647,6 +647,12 @@
static bool atojulong(const char *s, julong* result);
static bool has_jfr_option() NOT_JFR_RETURN_(false);
+
+ static bool is_dumping_archive() { return DumpSharedSpaces || DynamicDumpSharedSpaces; }
+
+ static void assert_is_dumping_archive() {
+ assert(Arguments::is_dumping_archive(), "dump time only");
+ }
};
// Disable options not supported in this release, with a warning if they
--- a/src/hotspot/share/runtime/compilationPolicy.cpp Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,726 +0,0 @@
-/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderDataGraph.inline.hpp"
-#include "code/compiledIC.hpp"
-#include "code/nmethod.hpp"
-#include "code/scopeDesc.hpp"
-#include "interpreter/interpreter.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/nativeLookup.hpp"
-#include "runtime/compilationPolicy.hpp"
-#include "runtime/frame.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/rframe.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/tieredThresholdPolicy.hpp"
-#include "runtime/vframe.hpp"
-#include "runtime/vmOperations.hpp"
-#include "utilities/events.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-#ifdef COMPILER1
-#include "c1/c1_Compiler.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/c2compiler.hpp"
-#endif
-
-CompilationPolicy* CompilationPolicy::_policy;
-
-// Determine compilation policy based on command line argument
-void compilationPolicy_init() {
- switch(CompilationPolicyChoice) {
- case 0:
- CompilationPolicy::set_policy(new SimpleCompPolicy());
- break;
-
- case 1:
-#ifdef COMPILER2
- CompilationPolicy::set_policy(new StackWalkCompPolicy());
-#else
- Unimplemented();
-#endif
- break;
- case 2:
-#ifdef TIERED
- CompilationPolicy::set_policy(new TieredThresholdPolicy());
-#else
- Unimplemented();
-#endif
- break;
- default:
- fatal("CompilationPolicyChoice must be in the range: [0-2]");
- }
- CompilationPolicy::policy()->initialize();
-}
-
-// Returns true if m must be compiled before executing it
-// This is intended to force compiles for methods (usually for
-// debugging) that would otherwise be interpreted for some reason.
-bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
- // Don't allow Xcomp to cause compiles in replay mode
- if (ReplayCompiles) return false;
-
- if (m->has_compiled_code()) return false; // already compiled
- if (!can_be_compiled(m, comp_level)) return false;
-
- return !UseInterpreter || // must compile all methods
- (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
-}
-
-void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) {
- if (must_be_compiled(selected_method)) {
- // This path is unusual, mostly used by the '-Xcomp' stress test mode.
-
- // Note: with several active threads, the must_be_compiled may be true
- // while can_be_compiled is false; remove assert
- // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
- if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
- // don't force compilation, resolve was on behalf of compiler
- return;
- }
- if (selected_method->method_holder()->is_not_initialized()) {
- // 'is_not_initialized' means not only '!is_initialized', but also that
- // initialization has not been started yet ('!being_initialized')
- // Do not force compilation of methods in uninitialized classes.
- // Note that doing this would throw an assert later,
- // in CompileBroker::compile_method.
- // We sometimes use the link resolver to do reflective lookups
- // even before classes are initialized.
- return;
- }
- CompileBroker::compile_method(selected_method, InvocationEntryBci,
- CompilationPolicy::policy()->initial_compile_level(),
- methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK);
- }
-}
-
-// Returns true if m is allowed to be compiled
-bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
- // allow any levels for WhiteBox
- assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level");
-
- if (m->is_abstract()) return false;
- if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
-
- // Math intrinsics should never be compiled as this can lead to
- // monotonicity problems because the interpreter will prefer the
- // compiled code to the intrinsic version. This can't happen in
- // production because the invocation counter can't be incremented
- // but we shouldn't expose the system to this problem in testing
- // modes.
- if (!AbstractInterpreter::can_be_compiled(m)) {
- return false;
- }
- if (comp_level == CompLevel_all) {
- if (TieredCompilation) {
- // enough to be compilable at any level for tiered
- return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization);
- } else {
- // must be compilable at available level for non-tiered
- return !m->is_not_compilable(CompLevel_highest_tier);
- }
- } else if (is_compile(comp_level)) {
- return !m->is_not_compilable(comp_level);
- }
- return false;
-}
-
-// Returns true if m is allowed to be osr compiled
-bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
- bool result = false;
- if (comp_level == CompLevel_all) {
- if (TieredCompilation) {
- // enough to be osr compilable at any level for tiered
- result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
- } else {
- // must be osr compilable at available level for non-tiered
- result = !m->is_not_osr_compilable(CompLevel_highest_tier);
- }
- } else if (is_compile(comp_level)) {
- result = !m->is_not_osr_compilable(comp_level);
- }
- return (result && can_be_compiled(m, comp_level));
-}
-
-bool CompilationPolicy::is_compilation_enabled() {
- // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
- return CompileBroker::should_compile_new_jobs();
-}
-
-CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
- // Remove unloaded methods from the queue
- for (CompileTask* task = compile_queue->first(); task != NULL; ) {
- CompileTask* next = task->next();
- if (task->is_unloaded()) {
- compile_queue->remove_and_mark_stale(task);
- }
- task = next;
- }
-#if INCLUDE_JVMCI
- if (UseJVMCICompiler && !BackgroundCompilation) {
- /*
- * In blocking compilation mode, the CompileBroker will make
- * compilations submitted by a JVMCI compiler thread non-blocking. These
- * compilations should be scheduled after all blocking compilations
- * to service non-compiler related compilations sooner and reduce the
- * chance of such compilations timing out.
- */
- for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) {
- if (task->is_blocking()) {
- return task;
- }
- }
- }
-#endif
- return compile_queue->first();
-}
-
-#ifndef PRODUCT
-void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
- if (TraceOnStackReplacement) {
- if (osr_nm == NULL) tty->print_cr("compilation failed");
- else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm));
- }
-}
-#endif // !PRODUCT
-
-void NonTieredCompPolicy::initialize() {
- // Setup the compiler thread numbers
- if (CICompilerCountPerCPU) {
- // Example: if CICompilerCountPerCPU is true, then we get
- // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
- // May help big-app startup time.
- _compiler_count = MAX2(log2_int(os::active_processor_count())-1,1);
- // Make sure there is enough space in the code cache to hold all the compiler buffers
- size_t buffer_size = 1;
-#ifdef COMPILER1
- buffer_size = is_client_compilation_mode_vm() ? Compiler::code_buffer_size() : buffer_size;
-#endif
-#ifdef COMPILER2
- buffer_size = is_server_compilation_mode_vm() ? C2Compiler::initial_code_buffer_size() : buffer_size;
-#endif
- int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
- if (_compiler_count > max_count) {
- // Lower the compiler count such that all buffers fit into the code cache
- _compiler_count = MAX2(max_count, 1);
- }
- FLAG_SET_ERGO(CICompilerCount, _compiler_count);
- } else {
- _compiler_count = CICompilerCount;
- }
-}
-
-// Note: this policy is used ONLY if TieredCompilation is off.
-// compiler_count() behaves the following way:
-// - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return
-// zero for the c1 compilation levels in server compilation mode runs
-// and c2 compilation levels in client compilation mode runs.
-// - with COMPILER2 not defined it should return zero for c2 compilation levels.
-// - with COMPILER1 not defined it should return zero for c1 compilation levels.
-// - if neither is defined - always return zero.
-int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
- assert(!TieredCompilation, "This policy should not be used with TieredCompilation");
- if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||)
- is_client_compilation_mode_vm() && is_c1_compile(comp_level)) {
- return _compiler_count;
- }
- return 0;
-}
-
-void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
- // Make sure invocation and backedge counter doesn't overflow again right away
- // as would be the case for native methods.
-
- // BUT also make sure the method doesn't look like it was never executed.
- // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
- MethodCounters* mcs = m->method_counters();
- assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
- mcs->invocation_counter()->set_carry();
- mcs->backedge_counter()->set_carry();
-
- assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
-}
-
-void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
- // Delay next back-branch event but pump up invocation counter to trigger
- // whole method compilation.
- MethodCounters* mcs = m->method_counters();
- assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
- InvocationCounter* i = mcs->invocation_counter();
- InvocationCounter* b = mcs->backedge_counter();
-
- // Don't set invocation_counter's value too low otherwise the method will
- // look like immature (ic < ~5300) which prevents the inlining based on
- // the type profiling.
- i->set(i->state(), CompileThreshold);
- // Don't reset counter too low - it is used to check if OSR method is ready.
- b->set(b->state(), CompileThreshold / 2);
-}
-
-//
-// CounterDecay
-//
-// Iterates through invocation counters and decrements them. This
-// is done at each safepoint.
-//
-class CounterDecay : public AllStatic {
- static jlong _last_timestamp;
- static void do_method(Method* m) {
- MethodCounters* mcs = m->method_counters();
- if (mcs != NULL) {
- mcs->invocation_counter()->decay();
- }
- }
-public:
- static void decay();
- static bool is_decay_needed() {
- return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
- }
-};
-
-jlong CounterDecay::_last_timestamp = 0;
-
-void CounterDecay::decay() {
- _last_timestamp = os::javaTimeMillis();
-
- // This operation is going to be performed only at the end of a safepoint
- // and hence GC's will not be going on, all Java mutators are suspended
- // at this point and hence SystemDictionary_lock is also not needed.
- assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
- size_t nclasses = ClassLoaderDataGraph::num_instance_classes();
- size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
- CounterHalfLifeTime);
- for (size_t i = 0; i < classes_per_tick; i++) {
- InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
- if (k != NULL) {
- k->methods_do(do_method);
- }
- }
-}
-
-// Called at the end of the safepoint
-void NonTieredCompPolicy::do_safepoint_work() {
- if(UseCounterDecay && CounterDecay::is_decay_needed()) {
- CounterDecay::decay();
- }
-}
-
-void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
- ScopeDesc* sd = trap_scope;
- MethodCounters* mcs;
- InvocationCounter* c;
- for (; !sd->is_top(); sd = sd->sender()) {
- mcs = sd->method()->method_counters();
- if (mcs != NULL) {
- // Reset ICs of inlined methods, since they can trigger compilations also.
- mcs->invocation_counter()->reset();
- }
- }
- mcs = sd->method()->method_counters();
- if (mcs != NULL) {
- c = mcs->invocation_counter();
- if (is_osr) {
- // It was an OSR method, so bump the count higher.
- c->set(c->state(), CompileThreshold);
- } else {
- c->reset();
- }
- mcs->backedge_counter()->reset();
- }
-}
-
-// This method can be called by any component of the runtime to notify the policy
-// that it's recommended to delay the compilation of this method.
-void NonTieredCompPolicy::delay_compilation(Method* method) {
- MethodCounters* mcs = method->method_counters();
- if (mcs != NULL) {
- mcs->invocation_counter()->decay();
- mcs->backedge_counter()->decay();
- }
-}
-
-void NonTieredCompPolicy::disable_compilation(Method* method) {
- MethodCounters* mcs = method->method_counters();
- if (mcs != NULL) {
- mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
- mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
- }
-}
-
-CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
- return select_task_helper(compile_queue);
-}
-
-bool NonTieredCompPolicy::is_mature(Method* method) {
- MethodData* mdo = method->method_data();
- assert(mdo != NULL, "Should be");
- uint current = mdo->mileage_of(method);
- uint initial = mdo->creation_mileage();
- if (current < initial)
- return true; // some sort of overflow
- uint target;
- if (ProfileMaturityPercentage <= 0)
- target = (uint) -ProfileMaturityPercentage; // absolute value
- else
- target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
- return (current >= initial + target);
-}
-
-nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
- int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
- assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
- NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
- if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
- // If certain JVMTI events (e.g. frame pop event) are requested then the
- // thread is forced to remain in interpreted code. This is
- // implemented partly by a check in the run_compiled_code
- // section of the interpreter whether we should skip running
- // compiled code, and partly by skipping OSR compiles for
- // interpreted-only threads.
- if (bci != InvocationEntryBci) {
- reset_counter_for_back_branch_event(method);
- return NULL;
- }
- }
- if (ReplayCompiles) {
- // Don't trigger other compiles in testing mode
- if (bci == InvocationEntryBci) {
- reset_counter_for_invocation_event(method);
- } else {
- reset_counter_for_back_branch_event(method);
- }
- return NULL;
- }
-
- if (bci == InvocationEntryBci) {
- // when code cache is full, compilation gets switched off, UseCompiler
- // is set to false
- if (!method->has_compiled_code() && UseCompiler) {
- method_invocation_event(method, thread);
- } else {
- // Force counter overflow on method entry, even if no compilation
- // happened. (The method_invocation_event call does this also.)
- reset_counter_for_invocation_event(method);
- }
- // compilation at an invocation overflow no longer goes and retries test for
- // compiled method. We always run the loser of the race as interpreted.
- // so return NULL
- return NULL;
- } else {
- // counter overflow in a loop => try to do on-stack-replacement
- nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
- NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
- // when code cache is full, we should not compile any more...
- if (osr_nm == NULL && UseCompiler) {
- method_back_branch_event(method, bci, thread);
- osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
- }
- if (osr_nm == NULL) {
- reset_counter_for_back_branch_event(method);
- return NULL;
- }
- return osr_nm;
- }
- return NULL;
-}
-
-#ifndef PRODUCT
-void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
- if (TraceInvocationCounterOverflow) {
- MethodCounters* mcs = m->method_counters();
- assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
- InvocationCounter* ic = mcs->invocation_counter();
- InvocationCounter* bc = mcs->backedge_counter();
- ResourceMark rm;
- if (bci == InvocationEntryBci) {
- tty->print("comp-policy cntr ovfl @ %d in entry of ", bci);
- } else {
- tty->print("comp-policy cntr ovfl @ %d in loop of ", bci);
- }
- m->print_value();
- tty->cr();
- ic->print();
- bc->print();
- if (ProfileInterpreter) {
- if (bci != InvocationEntryBci) {
- MethodData* mdo = m->method_data();
- if (mdo != NULL) {
- ProfileData *pd = mdo->bci_to_data(branch_bci);
- if (pd == NULL) {
- tty->print_cr("back branch count = N/A (missing ProfileData)");
- } else {
- tty->print_cr("back branch count = %d", pd->as_JumpData()->taken());
- }
- }
- }
- }
- }
-}
-
-void NonTieredCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) {
- if (TraceOnStackReplacement) {
- ResourceMark rm;
- tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
- method->print_short_name(tty);
- tty->print_cr(" at bci %d", bci);
- }
-}
-#endif // !PRODUCT
-
-// SimpleCompPolicy - compile current method
-
-void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
- const int comp_level = CompLevel_highest_tier;
- const int hot_count = m->invocation_count();
- reset_counter_for_invocation_event(m);
-
- if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
- CompiledMethod* nm = m->code();
- if (nm == NULL ) {
- CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread);
- }
- }
-}
-
-void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
- const int comp_level = CompLevel_highest_tier;
- const int hot_count = m->backedge_count();
-
- if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
- CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
- NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
- }
-}
-// StackWalkCompPolicy - walk up stack to find a suitable method to compile
-
-#ifdef COMPILER2
-const char* StackWalkCompPolicy::_msg = NULL;
-
-
-// Consider m for compilation
-void StackWalkCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
- const int comp_level = CompLevel_highest_tier;
- const int hot_count = m->invocation_count();
- reset_counter_for_invocation_event(m);
-
- if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) {
- ResourceMark rm(thread);
- frame fr = thread->last_frame();
- assert(fr.is_interpreted_frame(), "must be interpreted");
- assert(fr.interpreter_frame_method() == m(), "bad method");
-
- RegisterMap reg_map(thread, false);
- javaVFrame* triggerVF = thread->last_java_vframe(®_map);
- // triggerVF is the frame that triggered its counter
- RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m());
-
- if (first->top_method()->code() != NULL) {
- // called obsolete method/nmethod -- no need to recompile
- } else {
- GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
- stack->push(first);
- RFrame* top = findTopInlinableFrame(stack);
- assert(top != NULL, "findTopInlinableFrame returned null");
- CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level,
- m, hot_count, CompileTask::Reason_InvocationCount, thread);
- }
- }
-}
-
-void StackWalkCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) {
- const int comp_level = CompLevel_highest_tier;
- const int hot_count = m->backedge_count();
-
- if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
- CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread);
- NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
- }
-}
-
-RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
- // go up the stack until finding a frame that (probably) won't be inlined
- // into its caller
- RFrame* current = stack->at(0); // current choice for stopping
- assert( current && !current->is_compiled(), "" );
- const char* msg = NULL;
-
- while (1) {
-
- // before going up the stack further, check if doing so would get us into
- // compiled code
- RFrame* next = senderOf(current, stack);
- if( !next ) // No next frame up the stack?
- break; // Then compile with current frame
-
- Method* m = current->top_method();
- Method* next_m = next->top_method();
-
- if( !Inline ) { // Inlining turned off
- msg = "Inlining turned off";
- break;
- }
- if (next_m->is_not_compilable()) { // Did fail to compile this before/
- msg = "caller not compilable";
- break;
- }
- if (next->num() > MaxRecompilationSearchLength) {
- // don't go up too high when searching for recompilees
- msg = "don't go up any further: > MaxRecompilationSearchLength";
- break;
- }
- if (next->distance() > MaxInterpretedSearchLength) {
- // don't go up too high when searching for recompilees
- msg = "don't go up any further: next > MaxInterpretedSearchLength";
- break;
- }
- // Compiled frame above already decided not to inline;
- // do not recompile him.
- if (next->is_compiled()) {
- msg = "not going up into optimized code";
- break;
- }
-
- // Interpreted frame above us was already compiled. Do not force
- // a recompile, although if the frame above us runs long enough an
- // OSR might still happen.
- if( current->is_interpreted() && next_m->has_compiled_code() ) {
- msg = "not going up -- already compiled caller";
- break;
- }
-
- // Compute how frequent this call site is. We have current method 'm'.
- // We know next method 'next_m' is interpreted. Find the call site and
- // check the various invocation counts.
- int invcnt = 0; // Caller counts
- if (ProfileInterpreter) {
- invcnt = next_m->interpreter_invocation_count();
- }
- int cnt = 0; // Call site counts
- if (ProfileInterpreter && next_m->method_data() != NULL) {
- ResourceMark rm;
- int bci = next->top_vframe()->bci();
- ProfileData* data = next_m->method_data()->bci_to_data(bci);
- if (data != NULL && data->is_CounterData())
- cnt = data->as_CounterData()->count();
- }
-
- // Caller counts / call-site counts; i.e. is this call site
- // a hot call site for method next_m?
- int freq = (invcnt) ? cnt/invcnt : cnt;
-
- // Check size and frequency limits
- if ((msg = shouldInline(m, freq, cnt)) != NULL) {
- break;
- }
- // Check inlining negative tests
- if ((msg = shouldNotInline(m)) != NULL) {
- break;
- }
-
-
- // If the caller method is too big or something then we do not want to
- // compile it just to inline a method
- if (!can_be_compiled(next_m, CompLevel_any)) {
- msg = "caller cannot be compiled";
- break;
- }
-
- if( next_m->name() == vmSymbols::class_initializer_name() ) {
- msg = "do not compile class initializer (OSR ok)";
- break;
- }
-
- current = next;
- }
-
- assert( !current || !current->is_compiled(), "" );
-
- return current;
-}
-
-RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) {
- RFrame* sender = rf->caller();
- if (sender && sender->num() == stack->length()) stack->push(sender);
- return sender;
-}
-
-
-const char* StackWalkCompPolicy::shouldInline(const methodHandle& m, float freq, int cnt) {
- // Allows targeted inlining
- // positive filter: should send be inlined? returns NULL (--> yes)
- // or rejection msg
- int max_size = MaxInlineSize;
- int cost = m->code_size();
-
- // Check for too many throws (and not too huge)
- if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) {
- return NULL;
- }
-
- // bump the max size if the call is frequent
- if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) {
- if (TraceFrequencyInlining) {
- tty->print("(Inlined frequent method)\n");
- m->print();
- }
- max_size = FreqInlineSize;
- }
- if (cost > max_size) {
- return (_msg = "too big");
- }
- return NULL;
-}
-
-
-const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) {
- // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
- if (m->is_abstract()) return (_msg = "abstract method");
- // note: we allow ik->is_abstract()
- if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized");
- if (m->is_native()) return (_msg = "native method");
- CompiledMethod* m_code = m->code();
- if (m_code != NULL && m_code->code_size() > InlineSmallCode)
- return (_msg = "already compiled into a big method");
-
- // use frequency-based objections only for non-trivial methods
- if (m->code_size() <= MaxTrivialSize) return NULL;
- if (UseInterpreter) { // don't use counts with -Xcomp
- if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
- if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
- }
- if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes");
-
- return NULL;
-}
-
-
-
-#endif // COMPILER2
--- a/src/hotspot/share/runtime/compilationPolicy.hpp Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_RUNTIME_COMPILATIONPOLICY_HPP
-#define SHARE_RUNTIME_COMPILATIONPOLICY_HPP
-
-#include "code/nmethod.hpp"
-#include "compiler/compileBroker.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/vmOperations.hpp"
-#include "utilities/growableArray.hpp"
-
-// The CompilationPolicy selects which method (if any) should be compiled.
-// It also decides which methods must always be compiled (i.e., are never
-// interpreted).
-class CompileTask;
-class CompileQueue;
-class RFrame;
-
-class CompilationPolicy : public CHeapObj<mtCompiler> {
- static CompilationPolicy* _policy;
-
- // m must be compiled before executing it
- static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_all);
-
-public:
- // If m must_be_compiled then request a compilation from the CompileBroker.
- // This supports the -Xcomp option.
- static void compile_if_required(const methodHandle& m, TRAPS);
-
- // m is allowed to be compiled
- static bool can_be_compiled(const methodHandle& m, int comp_level = CompLevel_all);
- // m is allowed to be osr compiled
- static bool can_be_osr_compiled(const methodHandle& m, int comp_level = CompLevel_all);
- static bool is_compilation_enabled();
- static void set_policy(CompilationPolicy* policy) { _policy = policy; }
- static CompilationPolicy* policy() { return _policy; }
-
- static CompileTask* select_task_helper(CompileQueue* compile_queue);
-
- // Return initial compile level that is used with Xcomp
- virtual CompLevel initial_compile_level() = 0;
- virtual int compiler_count(CompLevel comp_level) = 0;
- // main notification entry, return a pointer to an nmethod if the OSR is required,
- // returns NULL otherwise.
- virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) = 0;
- // safepoint() is called at the end of the safepoint
- virtual void do_safepoint_work() = 0;
- // reprofile request
- virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
- // delay_compilation(method) can be called by any component of the runtime to notify the policy
- // that it's recommended to delay the compilation of this method.
- virtual void delay_compilation(Method* method) = 0;
- // disable_compilation() is called whenever the runtime decides to disable compilation of the
- // specified method.
- virtual void disable_compilation(Method* method) = 0;
- // Select task is called by CompileBroker. The queue is guaranteed to have at least one
- // element and is locked. The function should select one and return it.
- virtual CompileTask* select_task(CompileQueue* compile_queue) = 0;
- // Tell the runtime if we think a given method is adequately profiled.
- virtual bool is_mature(Method* method) = 0;
- // Do policy initialization
- virtual void initialize() = 0;
- virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; }
-};
-
-// A base class for baseline policies.
-class NonTieredCompPolicy : public CompilationPolicy {
- int _compiler_count;
-protected:
- static void trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci);
- static void trace_osr_request(const methodHandle& method, nmethod* osr, int bci);
- static void trace_osr_completion(nmethod* osr_nm);
- void reset_counter_for_invocation_event(const methodHandle& method);
- void reset_counter_for_back_branch_event(const methodHandle& method);
-public:
- NonTieredCompPolicy() : _compiler_count(0) { }
- virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; }
- virtual int compiler_count(CompLevel comp_level);
- virtual void do_safepoint_work();
- virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
- virtual void delay_compilation(Method* method);
- virtual void disable_compilation(Method* method);
- virtual bool is_mature(Method* method);
- virtual void initialize();
- virtual CompileTask* select_task(CompileQueue* compile_queue);
- virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread);
- virtual void method_invocation_event(const methodHandle& m, JavaThread* thread) = 0;
- virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) = 0;
-};
-
-class SimpleCompPolicy : public NonTieredCompPolicy {
- public:
- virtual void method_invocation_event(const methodHandle& m, JavaThread* thread);
- virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
-};
-
-// StackWalkCompPolicy - existing C2 policy
-
-#ifdef COMPILER2
-class StackWalkCompPolicy : public NonTieredCompPolicy {
- public:
- virtual void method_invocation_event(const methodHandle& m, JavaThread* thread);
- virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
-
- private:
- RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
- RFrame* senderOf(RFrame* rf, GrowableArray<RFrame*>* stack);
-
- // the following variables hold values computed by the last inlining decision
- // they are used for performance debugging only (print better messages)
- static const char* _msg; // reason for not inlining
-
- static const char* shouldInline (const methodHandle& callee, float frequency, int cnt);
- // positive filter: should send be inlined? returns NULL (--> yes) or rejection msg
- static const char* shouldNotInline(const methodHandle& callee);
- // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
-
-};
-#endif
-
-#endif // SHARE_RUNTIME_COMPILATIONPOLICY_HPP
--- a/src/hotspot/share/runtime/deoptimization.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/deoptimization.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -31,6 +31,7 @@
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "code/scopeDesc.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
@@ -48,7 +49,6 @@
#include "oops/verifyOopClosure.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
--- a/src/hotspot/share/runtime/globals.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/globals.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -385,7 +385,7 @@
notproduct(ccstrlist, DeoptimizeOnlyAt, "", \
"A comma separated list of bcis to deoptimize at") \
\
- product(bool, DeoptimizeRandom, false, \
+ develop(bool, DeoptimizeRandom, false, \
"Deoptimize random frames on random exit from the runtime system")\
\
notproduct(bool, ZombieALot, false, \
@@ -1017,10 +1017,6 @@
"Inject thread creation failures for " \
"UseDynamicNumberOfCompilerThreads") \
\
- product(intx, CompilationPolicyChoice, 0, \
- "which compilation policy (0-2)") \
- range(0, 2) \
- \
develop(bool, UseStackBanging, true, \
"use stack banging for stack overflow checks (required for " \
"proper StackOverflow handling; disable only to measure cost " \
@@ -1051,6 +1047,9 @@
diagnostic(bool, EnableThreadSMRStatistics, trueInDebug, \
"Enable Thread SMR Statistics") \
\
+ product(bool, UseNotificationThread, true, \
+ "Use Notification Thread") \
+ \
product(bool, Inline, true, \
"Enable inlining") \
\
@@ -2143,14 +2142,6 @@
"% of CompileThreshold) before profiling in the interpreter") \
range(0, 100) \
\
- develop(intx, MaxRecompilationSearchLength, 10, \
- "The maximum number of frames to inspect when searching for " \
- "recompilee") \
- \
- develop(intx, MaxInterpretedSearchLength, 3, \
- "The maximum number of interpreted frames to skip when searching "\
- "for recompilee") \
- \
develop(intx, DesiredMethodLimit, 8000, \
"The desired maximum method size (in bytecodes) after inlining") \
\
--- a/src/hotspot/share/runtime/interfaceSupport.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/interfaceSupport.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -55,12 +55,6 @@
if (WalkStackALot) {
InterfaceSupport::walk_stack();
}
-#ifdef COMPILER2
- // This option is not used by Compiler 1
- if (StressDerivedPointers) {
- InterfaceSupport::stress_derived_pointers();
- }
-#endif
if (DeoptimizeALot || DeoptimizeRandom) {
InterfaceSupport::deoptimizeAll();
}
@@ -234,31 +228,6 @@
}
-void InterfaceSupport::stress_derived_pointers() {
-#ifdef COMPILER2
- JavaThread *thread = JavaThread::current();
- if (!is_init_completed()) return;
- ResourceMark rm(thread);
- bool found = false;
- for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
- CodeBlob* cb = sfs.current()->cb();
- if (cb != NULL && cb->oop_maps() ) {
- // Find oopmap for current method
- const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc());
- assert(map != NULL, "no oopmap found for pc");
- found = map->has_derived_pointer();
- }
- }
- if (found) {
- // $$$ Not sure what to do here.
- /*
- Scavenge::invoke(0);
- */
- }
-#endif
-}
-
-
void InterfaceSupport::verify_stack() {
JavaThread* thread = JavaThread::current();
ResourceMark rm(thread);
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -62,7 +62,6 @@
static void zombieAll();
static void deoptimizeAll();
- static void stress_derived_pointers();
static void verify_stack();
static void verify_last_frame();
# endif
--- a/src/hotspot/share/runtime/java.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/java.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -56,7 +56,6 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/runtime/javaCalls.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/javaCalls.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/nmethod.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
@@ -33,7 +34,6 @@
#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jniCheck.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
--- a/src/hotspot/share/runtime/mutex.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/mutex.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -70,22 +70,16 @@
}
#endif // ASSERT
-void Mutex::lock(Thread* self) {
- check_safepoint_state(self);
-
- assert(_owner != self, "invariant");
-
- Mutex* in_flight_mutex = NULL;
+void Mutex::lock_contended(Thread* self) {
+ Mutex *in_flight_mutex = NULL;
DEBUG_ONLY(int retry_cnt = 0;)
bool is_active_Java_thread = self->is_active_Java_thread();
- while (!_lock.try_lock()) {
- // The lock is contended
-
- #ifdef ASSERT
+ do {
+ #ifdef ASSERT
if (retry_cnt++ > 3) {
log_trace(vmmutex)("JavaThread " INTPTR_FORMAT " on %d attempt trying to acquire vmmutex %s", p2i(self), retry_cnt, _name);
}
- #endif // ASSERT
+ #endif // ASSERT
// Is it a JavaThread participating in the safepoint protocol.
if (is_active_Java_thread) {
@@ -102,6 +96,17 @@
_lock.lock();
break;
}
+ } while (!_lock.try_lock());
+}
+
+void Mutex::lock(Thread* self) {
+ check_safepoint_state(self);
+
+ assert(_owner != self, "invariant");
+
+ if (!_lock.try_lock()) {
+ // The lock is contended, use contended slow-path function to lock
+ lock_contended(self);
}
assert_owner(NULL);
@@ -109,7 +114,7 @@
}
void Mutex::lock() {
- this->lock(Thread::current());
+ lock(Thread::current());
}
// Lock without safepoint check - a degenerate variant of lock() for use by
@@ -282,6 +287,11 @@
assert(_safepoint_check_required != _safepoint_check_sometimes || is_sometimes_ok(name),
"Lock has _safepoint_check_sometimes %s", name);
+
+ assert(_rank > special || _allow_vm_block,
+ "Special locks or below should allow the vm to block");
+ assert(_rank > special || _safepoint_check_required == _safepoint_check_never,
+ "Special locks or below should never safepoint");
#endif
}
@@ -388,17 +398,13 @@
// NSV implied with locking allow_vm_block or !safepoint_check locks.
void Mutex::no_safepoint_verifier(Thread* thread, bool enable) {
- // Threads_lock is special, since the safepoint synchronization will not start before this is
- // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
- // since it is used to transfer control between JavaThreads and the VMThread
- // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
- if ((_allow_vm_block &&
- this != Threads_lock &&
- this != Compile_lock && // Temporary: should not be necessary when we get separate compilation
- this != tty_lock && // The tty_lock is released for the safepoint.
- this != VMOperationRequest_lock &&
- this != VMOperationQueue_lock) ||
- rank() == Mutex::special) {
+ // The tty_lock is special because it is released for the safepoint by
+ // the safepoint mechanism.
+ if (this == tty_lock) {
+ return;
+ }
+
+ if (_allow_vm_block) {
if (enable) {
thread->_no_safepoint_count++;
} else {
--- a/src/hotspot/share/runtime/mutex.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/mutex.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -152,6 +152,9 @@
bool is_locked() const { return _owner != NULL; }
bool try_lock(); // Like lock(), but unblocking. It returns false instead
+ private:
+ void lock_contended(Thread *thread); // contended slow-path
+ public:
void release_for_safepoint();
--- a/src/hotspot/share/runtime/mutexLocker.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/mutexLocker.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -66,7 +66,6 @@
Mutex* RetData_lock = NULL;
Monitor* VMOperationQueue_lock = NULL;
Monitor* VMOperationRequest_lock = NULL;
-Monitor* SerializePage_lock = NULL;
Monitor* Threads_lock = NULL;
Mutex* NonJavaThreadsList_lock = NULL;
Mutex* NonJavaThreadsListSync_lock = NULL;
@@ -116,8 +115,10 @@
Mutex* Management_lock = NULL;
Monitor* Service_lock = NULL;
+Monitor* Notification_lock = NULL;
Monitor* PeriodicTask_lock = NULL;
Monitor* RedefineClasses_lock = NULL;
+Mutex* Verify_lock = NULL;
#if INCLUDE_JFR
Mutex* JfrStacktrace_lock = NULL;
@@ -135,6 +136,7 @@
Mutex* MetaspaceExpand_lock = NULL;
Mutex* ClassLoaderDataGraph_lock = NULL;
Monitor* ThreadsSMRDelete_lock = NULL;
+Mutex* ThreadIdTableCreate_lock = NULL;
Mutex* SharedDecoder_lock = NULL;
Mutex* DCmdFactory_lock = NULL;
#if INCLUDE_NMT
@@ -190,147 +192,156 @@
#endif
#define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
- var = new type(Mutex::pri, #var, vm_block, safepoint_check_allowed); \
+ var = new type(Mutex::pri, #var, vm_block, Mutex::safepoint_check_allowed); \
assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
_mutex_array[_num_mutex++] = var; \
}
// Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
void mutex_init() {
- def(tty_lock , PaddedMutex , tty, true, Monitor::_safepoint_check_never); // allow to lock in VM
+ def(tty_lock , PaddedMutex , tty, true, _safepoint_check_never); // allow to lock in VM
- def(CGC_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // coordinate between fore- and background GC
- def(STS_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
+ def(CGC_lock , PaddedMonitor, special, true, _safepoint_check_never); // coordinate between fore- and background GC
+ def(STS_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
- def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
+ def(FullGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
if (UseG1GC) {
- def(DirtyCardQ_CBL_mon , PaddedMonitor, access, true, Monitor::_safepoint_check_never);
- def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never);
+ def(DirtyCardQ_CBL_mon , PaddedMonitor, access, true, _safepoint_check_never);
+ def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, _safepoint_check_never);
- def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
- def(OldSets_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
- def(RootRegionScan_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);
+ def(FreeList_lock , PaddedMutex , leaf , true, _safepoint_check_never);
+ def(OldSets_lock , PaddedMutex , leaf , true, _safepoint_check_never);
+ def(RootRegionScan_lock , PaddedMonitor, leaf , true, _safepoint_check_never);
- def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
- def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
+ def(StringDedupQueue_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
+ def(StringDedupTable_lock , PaddedMutex , leaf, true, _safepoint_check_never);
- def(MarkStackFreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
- def(MarkStackChunkList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
+ def(MarkStackFreeList_lock , PaddedMutex , leaf , true, _safepoint_check_never);
+ def(MarkStackChunkList_lock , PaddedMutex , leaf , true, _safepoint_check_never);
- def(MonitoringSupport_lock , PaddedMutex , native , true, Monitor::_safepoint_check_never); // used for serviceability monitoring support
+ def(MonitoringSupport_lock , PaddedMutex , native , true, _safepoint_check_never); // used for serviceability monitoring support
}
if (UseShenandoahGC) {
- def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
- def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
+ def(StringDedupQueue_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
+ def(StringDedupTable_lock , PaddedMutex , leaf, true, _safepoint_check_never);
}
- def(ParGCRareEvent_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_always);
- def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_always);
- def(CodeCache_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never);
- def(RawMonitor_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
- def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation.
+ def(ParGCRareEvent_lock , PaddedMutex , leaf , true, _safepoint_check_always);
+ def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, _safepoint_check_always);
+ def(CodeCache_lock , PaddedMonitor, special, true, _safepoint_check_never);
+ def(RawMonitor_lock , PaddedMutex , special, true, _safepoint_check_never);
+ def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for oop_map_cache allocation.
+
+ def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, _safepoint_check_never);
+ def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, false, _safepoint_check_always);
- def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
- def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, false, Monitor::_safepoint_check_always);
+ def(Patching_lock , PaddedMutex , special, true, _safepoint_check_never); // used for safepointing and code patching.
+ def(CompiledMethod_lock , PaddedMutex , special-1, true, _safepoint_check_never);
+ def(Service_lock , PaddedMonitor, special, true, _safepoint_check_never); // used for service thread operations
- def(Patching_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching.
- def(CompiledMethod_lock , PaddedMutex , special-1, true, Monitor::_safepoint_check_never);
- def(Service_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // used for service thread operations
- def(JmethodIdCreation_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for creating jmethodIDs.
+ if (UseNotificationThread) {
+ def(Notification_lock , PaddedMonitor, special, true, _safepoint_check_never); // used for notification thread operations
+ } else {
+ Notification_lock = Service_lock;
+ }
- def(SystemDictionary_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always);
- def(ProtectionDomainSet_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
- def(SharedDictionary_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always);
- def(Module_lock , PaddedMutex , leaf+2, false, Monitor::_safepoint_check_always);
- def(InlineCacheBuffer_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
- def(VMStatistic_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always);
- def(ExpandHeap_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // Used during compilation by VM thread
- def(JNIHandleBlockFreeList_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); // handles are used by VM thread
- def(SignatureHandlerLibrary_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always);
- def(SymbolArena_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_never);
- def(ProfilePrint_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // serial profile printing
- def(ExceptionCache_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // serial profile printing
- def(Debug1_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
+ def(JmethodIdCreation_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for creating jmethodIDs.
+
+ def(SystemDictionary_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
+ def(ProtectionDomainSet_lock , PaddedMutex , leaf-1, true, _safepoint_check_never);
+ def(SharedDictionary_lock , PaddedMutex , leaf, true, _safepoint_check_always);
+ def(Module_lock , PaddedMutex , leaf+2, false, _safepoint_check_always);
+ def(InlineCacheBuffer_lock , PaddedMutex , leaf, true, _safepoint_check_never);
+ def(VMStatistic_lock , PaddedMutex , leaf, false, _safepoint_check_always);
+ def(ExpandHeap_lock , PaddedMutex , leaf, true, _safepoint_check_always); // Used during compilation by VM thread
+ def(JNIHandleBlockFreeList_lock , PaddedMutex , leaf-1, true, _safepoint_check_never); // handles are used by VM thread
+ def(SignatureHandlerLibrary_lock , PaddedMutex , leaf, false, _safepoint_check_always);
+ def(SymbolArena_lock , PaddedMutex , leaf+2, true, _safepoint_check_never);
+ def(ProfilePrint_lock , PaddedMutex , leaf, false, _safepoint_check_always); // serial profile printing
+ def(ExceptionCache_lock , PaddedMutex , leaf, false, _safepoint_check_always); // serial profile printing
+ def(Debug1_lock , PaddedMutex , leaf, true, _safepoint_check_never);
#ifndef PRODUCT
- def(FullGCALot_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // a lock to make FullGCALot MT safe
+ def(FullGCALot_lock , PaddedMutex , leaf, false, _safepoint_check_always); // a lock to make FullGCALot MT safe
#endif
- def(BeforeExit_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always);
- def(PerfDataMemAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for allocating PerfData memory for performance data
- def(PerfDataManager_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for synchronized access to PerfDataManager resources
+ def(BeforeExit_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
+ def(PerfDataMemAlloc_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for allocating PerfData memory for performance data
+ def(PerfDataManager_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for synchronized access to PerfDataManager resources
// CMS_modUnionTable_lock leaf
// CMS_bitMap_lock leaf 1
// CMS_freeList_lock leaf 2
- def(Threads_lock , PaddedMonitor, barrier, true, Monitor::_safepoint_check_always); // Used for safepoint protocol.
- def(NonJavaThreadsList_lock , PaddedMutex, leaf, true, Monitor::_safepoint_check_never);
- def(NonJavaThreadsListSync_lock , PaddedMutex, leaf, true, Monitor::_safepoint_check_never);
+ def(Threads_lock , PaddedMonitor, barrier, true, _safepoint_check_always); // Used for safepoint protocol.
+ def(NonJavaThreadsList_lock , PaddedMutex, leaf, true, _safepoint_check_never);
+ def(NonJavaThreadsListSync_lock , PaddedMutex, leaf, true, _safepoint_check_never);
- def(VMOperationQueue_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_never); // VM_thread allowed to block on these
- def(VMOperationRequest_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always);
- def(RetData_lock , PaddedMutex , nonleaf, false, Monitor::_safepoint_check_always);
- def(Terminator_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always);
- def(InitCompleted_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
- def(VtableStubs_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
- def(Notify_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always);
- def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions
- def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);
+ def(VMOperationQueue_lock , PaddedMonitor, nonleaf, true, _safepoint_check_never); // VM_thread allowed to block on these
+ def(VMOperationRequest_lock , PaddedMonitor, nonleaf, true, _safepoint_check_always);
+ def(RetData_lock , PaddedMutex , nonleaf, false, _safepoint_check_always);
+ def(Terminator_lock , PaddedMonitor, nonleaf, true, _safepoint_check_always);
+ def(InitCompleted_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
+ def(VtableStubs_lock , PaddedMutex , nonleaf, true, _safepoint_check_never);
+ def(Notify_lock , PaddedMonitor, nonleaf, true, _safepoint_check_always);
+ def(JNICritical_lock , PaddedMonitor, nonleaf, true, _safepoint_check_always); // used for JNI critical regions
+ def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, _safepoint_check_always);
- def(Heap_lock , PaddedMonitor, nonleaf+1, false, Monitor::_safepoint_check_sometimes); // Doesn't safepoint check during termination.
- def(JfieldIdCreation_lock , PaddedMutex , nonleaf+1, true, Monitor::_safepoint_check_always); // jfieldID, Used in VM_Operation
+ def(Heap_lock , PaddedMonitor, nonleaf+1, false, _safepoint_check_sometimes); // Doesn't safepoint check during termination.
+ def(JfieldIdCreation_lock , PaddedMutex , nonleaf+1, true, _safepoint_check_always); // jfieldID, Used in VM_Operation
- def(CompiledIC_lock , PaddedMutex , nonleaf+2, false, Monitor::_safepoint_check_never); // locks VtableStubs_lock, InlineCacheBuffer_lock
- def(CompileTaskAlloc_lock , PaddedMutex , nonleaf+2, true, Monitor::_safepoint_check_always);
- def(CompileStatistics_lock , PaddedMutex , nonleaf+2, false, Monitor::_safepoint_check_always);
- def(DirectivesStack_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
- def(MultiArray_lock , PaddedMutex , nonleaf+2, false, Monitor::_safepoint_check_always);
+ def(CompiledIC_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_never); // locks VtableStubs_lock, InlineCacheBuffer_lock
+ def(CompileTaskAlloc_lock , PaddedMutex , nonleaf+2, true, _safepoint_check_always);
+ def(CompileStatistics_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
+ def(DirectivesStack_lock , PaddedMutex , special, true, _safepoint_check_never);
+ def(MultiArray_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
- def(JvmtiThreadState_lock , PaddedMutex , nonleaf+2, false, Monitor::_safepoint_check_always); // Used by JvmtiThreadState/JvmtiEventController
- def(Management_lock , PaddedMutex , nonleaf+2, false, Monitor::_safepoint_check_always); // used for JVM management
+ def(JvmtiThreadState_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always); // Used by JvmtiThreadState/JvmtiEventController
+ def(Management_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always); // used for JVM management
- def(Compile_lock , PaddedMutex , nonleaf+3, true, Monitor::_safepoint_check_always);
- def(MethodData_lock , PaddedMutex , nonleaf+3, false, Monitor::_safepoint_check_always);
- def(TouchedMethodLog_lock , PaddedMutex , nonleaf+3, false, Monitor::_safepoint_check_always);
+ def(Compile_lock , PaddedMutex , nonleaf+3, false, _safepoint_check_always);
+ def(MethodData_lock , PaddedMutex , nonleaf+3, false, _safepoint_check_always);
+ def(TouchedMethodLog_lock , PaddedMutex , nonleaf+3, false, _safepoint_check_always);
- def(MethodCompileQueue_lock , PaddedMonitor, nonleaf+4, false, Monitor::_safepoint_check_always);
- def(Debug2_lock , PaddedMutex , nonleaf+4, true, Monitor::_safepoint_check_never);
- def(Debug3_lock , PaddedMutex , nonleaf+4, true, Monitor::_safepoint_check_never);
- def(CompileThread_lock , PaddedMonitor, nonleaf+5, false, Monitor::_safepoint_check_always);
- def(PeriodicTask_lock , PaddedMonitor, nonleaf+5, true, Monitor::_safepoint_check_always);
- def(RedefineClasses_lock , PaddedMonitor, nonleaf+5, true, Monitor::_safepoint_check_always);
+ def(MethodCompileQueue_lock , PaddedMonitor, nonleaf+4, false, _safepoint_check_always);
+ def(Debug2_lock , PaddedMutex , nonleaf+4, true, _safepoint_check_never);
+ def(Debug3_lock , PaddedMutex , nonleaf+4, true, _safepoint_check_never);
+ def(CompileThread_lock , PaddedMonitor, nonleaf+5, false, _safepoint_check_always);
+ def(PeriodicTask_lock , PaddedMonitor, nonleaf+5, true, _safepoint_check_always);
+ def(RedefineClasses_lock , PaddedMonitor, nonleaf+5, true, _safepoint_check_always);
+ def(Verify_lock , PaddedMutex, nonleaf+5, true, _safepoint_check_always);
if (WhiteBoxAPI) {
- def(Compilation_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_never);
+ def(Compilation_lock , PaddedMonitor, leaf, false, _safepoint_check_never);
}
#if INCLUDE_JFR
- def(JfrMsg_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always);
- def(JfrBuffer_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
- def(JfrStream_lock , PaddedMutex , leaf+1, true, Monitor::_safepoint_check_never); // ensure to rank lower than 'safepoint'
- def(JfrStacktrace_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
- def(JfrThreadSampler_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
+ def(JfrMsg_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
+ def(JfrBuffer_lock , PaddedMutex , leaf, true, _safepoint_check_never);
+ def(JfrStream_lock , PaddedMutex , leaf+1, true, _safepoint_check_never); // ensure to rank lower than 'safepoint'
+ def(JfrStacktrace_lock , PaddedMutex , special, true, _safepoint_check_never);
+ def(JfrThreadSampler_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
#endif
#ifndef SUPPORTS_NATIVE_CX8
- def(UnsafeJlong_lock , PaddedMutex , special, false, Monitor::_safepoint_check_never);
+ def(UnsafeJlong_lock , PaddedMutex , special, false, _safepoint_check_never);
#endif
- def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
- def(NMethodSweeperStats_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
- def(ThreadsSMRDelete_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never);
- def(SharedDecoder_lock , PaddedMutex , native, false, Monitor::_safepoint_check_never);
- def(DCmdFactory_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
+ def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, _safepoint_check_never);
+ def(NMethodSweeperStats_lock , PaddedMutex , special, true, _safepoint_check_never);
+ def(ThreadsSMRDelete_lock , PaddedMonitor, special, true, _safepoint_check_never);
+ def(ThreadIdTableCreate_lock , PaddedMutex , leaf, false, _safepoint_check_always);
+ def(SharedDecoder_lock , PaddedMutex , native, false, _safepoint_check_never);
+ def(DCmdFactory_lock , PaddedMutex , leaf, true, _safepoint_check_never);
#if INCLUDE_NMT
- def(NMTQuery_lock , PaddedMutex , max_nonleaf, false, Monitor::_safepoint_check_always);
+ def(NMTQuery_lock , PaddedMutex , max_nonleaf, false, _safepoint_check_always);
#endif
#if INCLUDE_CDS
#if INCLUDE_JVMTI
- def(CDSClassFileStream_lock , PaddedMutex , max_nonleaf, false, Monitor::_safepoint_check_always);
+ def(CDSClassFileStream_lock , PaddedMutex , max_nonleaf, false, _safepoint_check_always);
#endif
#if INCLUDE_JVMCI
- def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, Monitor::_safepoint_check_always);
+ def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, _safepoint_check_always);
#endif
- def(DumpTimeTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
+ def(DumpTimeTable_lock , PaddedMutex , leaf, true, _safepoint_check_never);
#endif // INCLUDE_CDS
}
--- a/src/hotspot/share/runtime/mutexLocker.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/mutexLocker.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -103,7 +103,6 @@
extern Mutex* RawMonitor_lock;
extern Mutex* PerfDataMemAlloc_lock; // a lock on the allocator for PerfData memory for performance data
extern Mutex* PerfDataManager_lock; // a long on access to PerfDataManager resources
-extern Mutex* ParkerFreeList_lock;
extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches
extern Mutex* FreeList_lock; // protects the free region list during safepoints
@@ -112,9 +111,12 @@
extern Mutex* Management_lock; // a lock used to serialize JVM management
extern Monitor* Service_lock; // a lock used for service thread operation
+extern Monitor* Notification_lock; // a lock used for notification thread operation
extern Monitor* PeriodicTask_lock; // protects the periodic task structure
extern Monitor* RedefineClasses_lock; // locks classes from parallel redefinition
+extern Mutex* Verify_lock; // synchronize initialization of verify library
extern Monitor* ThreadsSMRDelete_lock; // Used by ThreadsSMRSupport to take pressure off the Threads_lock
+extern Mutex* ThreadIdTableCreate_lock; // Used by ThreadIdTable to lazily create the thread id table
extern Mutex* SharedDecoder_lock; // serializes access to the decoder during normal (not error reporting) use
extern Mutex* DCmdFactory_lock; // serialize access to DCmdFactory information
#if INCLUDE_NMT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/notificationThread.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/universe.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/notificationThread.hpp"
+#include "services/diagnosticArgument.hpp"
+#include "services/diagnosticFramework.hpp"
+#include "services/gcNotifier.hpp"
+#include "services/lowMemoryDetector.hpp"
+
+NotificationThread* NotificationThread::_instance = NULL;
+
+void NotificationThread::initialize() {
+ EXCEPTION_MARK;
+
+ const char* name = "Notification Thread";
+ Handle string = java_lang_String::create_from_str(name, CHECK);
+
+ // Initialize thread_oop to put it into the system threadGroup
+ Handle thread_group (THREAD, Universe::system_thread_group());
+ Handle thread_oop = JavaCalls::construct_new_instance(
+ SystemDictionary::Thread_klass(),
+ vmSymbols::threadgroup_string_void_signature(),
+ thread_group,
+ string,
+ CHECK);
+
+ Klass* group = SystemDictionary::ThreadGroup_klass();
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result,
+ thread_group,
+ group,
+ vmSymbols::add_method_name(),
+ vmSymbols::thread_void_signature(),
+ thread_oop,
+ THREAD);
+ {
+ MutexLocker mu(Threads_lock);
+ NotificationThread* thread = new NotificationThread(¬ification_thread_entry);
+
+ // At this point it may be possible that no osthread was created for the
+ // JavaThread due to lack of memory. We would have to throw an exception
+ // in that case. However, since this must work and we do not allow
+ // exceptions anyway, check and abort if this fails.
+ if (thread == NULL || thread->osthread() == NULL) {
+ vm_exit_during_initialization("java.lang.OutOfMemoryError",
+ os::native_thread_creation_failed_msg());
+ }
+
+ java_lang_Thread::set_thread(thread_oop(), thread);
+ java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+ java_lang_Thread::set_daemon(thread_oop());
+ thread->set_threadObj(thread_oop());
+ _instance = thread;
+
+ Threads::add(thread);
+ Thread::start(thread);
+ }
+}
+
+
+
+void NotificationThread::notification_thread_entry(JavaThread* jt, TRAPS) {
+ while (true) {
+ bool sensors_changed = false;
+ bool has_dcmd_notification_event = false;
+ bool has_gc_notification_event = false;
+ {
+ // Need state transition ThreadBlockInVM so that this thread
+ // will be handled by safepoint correctly when this thread is
+ // notified at a safepoint.
+
+ ThreadBlockInVM tbivm(jt);
+
+ MonitorLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
+ // Process all available work on each (outer) iteration, rather than
+ // only the first recognized bit of work, to avoid frequently true early
+ // tests from potentially starving later work. Hence the use of
+ // arithmetic-or to combine results; we don't want short-circuiting.
+ while (((sensors_changed = LowMemoryDetector::has_pending_requests()) |
+ (has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) |
+ (has_gc_notification_event = GCNotifier::has_event()))
+ == 0) {
+ // Wait as a suspend equalent until notified that there is some work to do.
+ ml.wait(0, true);
+ }
+
+ }
+
+ if (sensors_changed) {
+ LowMemoryDetector::process_sensor_changes(jt);
+ }
+
+ if(has_gc_notification_event) {
+ GCNotifier::sendNotification(CHECK);
+ }
+
+ if(has_dcmd_notification_event) {
+ DCmdFactory::send_notification(CHECK);
+ }
+
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/notificationThread.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_RUNTIME_NOTIFICATIONTHREAD_HPP
+#define SHARE_RUNTIME_NOTIFICATIONTHREAD_HPP
+
+#include "runtime/thread.hpp"
+
+// A JavaThread for low memory detection support, GC and
+// diagnostic framework notifications. This thread is not hidden
+// from the external view to allow the debugger to stop at the
+// breakpoints inside registred MXBean notification listeners.
+
+class NotificationThread : public JavaThread {
+ friend class VMStructs;
+ private:
+
+ static NotificationThread* _instance;
+
+ static void notification_thread_entry(JavaThread* thread, TRAPS);
+ NotificationThread(ThreadFunction entry_point) : JavaThread(entry_point) {};
+
+ public:
+ static void initialize();
+
+};
+
+#endif // SHARE_RUNTIME_NOTIFICATIONTHREAD_HPP
--- a/src/hotspot/share/runtime/objectMonitor.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/objectMonitor.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -43,7 +43,6 @@
class ObjectWaiter : public StackObj {
public:
enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ };
- enum Sorted { PREPEND, APPEND, SORTED };
ObjectWaiter* volatile _next;
ObjectWaiter* volatile _prev;
Thread* _thread;
@@ -51,7 +50,6 @@
ParkEvent * _event;
volatile int _notified;
volatile TStates TState;
- Sorted _Sorted; // List placement disposition
bool _active; // Contention monitoring is enabled
public:
ObjectWaiter(Thread* thread);
@@ -68,10 +66,6 @@
// WARNING: This is a very sensitive and fragile class. DO NOT make any
// changes unless you are fully aware of the underlying semantics.
//
-// Class JvmtiRawMonitor currently inherits from ObjectMonitor so
-// changes in this class must be careful to not break JvmtiRawMonitor.
-// These two subsystems should be separated.
-//
// ObjectMonitor Layout Overview/Highlights/Restrictions:
//
// - The _header field must be at offset 0 because the displaced header
@@ -127,16 +121,6 @@
// in a 64-bit JVM.
class ObjectMonitor {
- public:
- enum {
- OM_OK, // no error
- OM_SYSTEM_ERROR, // operating system error
- OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
- OM_INTERRUPTED, // Thread.interrupt()
- OM_TIMED_OUT // Object.wait() timed out
- };
-
- private:
friend class ObjectSynchronizer;
friend class ObjectWaiter;
friend class VMStructs;
@@ -158,16 +142,13 @@
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
sizeof(volatile markWord) + sizeof(void* volatile) +
sizeof(ObjectMonitor *));
- protected: // protected for JvmtiRawMonitor
void* volatile _owner; // pointer to owning thread OR BasicLock
- private:
volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
- protected: // protected for JvmtiRawMonitor
volatile intptr_t _recursions; // recursion count, 0 for first entry
ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
// The list is actually composed of WaitNodes,
// acting as proxies for Threads.
- private:
+
ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry.
Thread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
Thread* volatile _Responsible;
--- a/src/hotspot/share/runtime/os.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/os.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -536,14 +536,6 @@
char buffer[JVM_MAXPATHLEN];
char ebuf[1024];
- // Try to load verify dll first. In 1.3 java dll depends on it and is not
- // always able to find it when the loading executable is outside the JDK.
- // In order to keep working with 1.2 we ignore any loading errors.
- if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(),
- "verify")) {
- dll_load(buffer, ebuf, sizeof(ebuf));
- }
-
// Load java dll
if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(),
"java")) {
--- a/src/hotspot/share/runtime/rframe.cpp Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeCache.hpp"
-#include "interpreter/interpreter.hpp"
-#include "oops/method.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/symbol.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/rframe.hpp"
-#include "runtime/vframe.hpp"
-#include "runtime/vframe_hp.hpp"
-
-
-static RFrame*const noCaller = (RFrame*) 0x1; // no caller (i.e., initial frame)
-static RFrame*const noCallerYet = (RFrame*) 0x0; // caller not yet computed
-
-RFrame::RFrame(frame fr, JavaThread* thread, RFrame*const callee) :
- _fr(fr), _thread(thread), _callee(callee), _num(callee ? callee->num() + 1 : 0) {
- _caller = (RFrame*)noCallerYet;
- _invocations = 0;
- _distance = 0;
-}
-
-void RFrame::set_distance(int d) {
- assert(is_compiled() || d >= 0, "should be positive");
- _distance = d;
-}
-
-InterpretedRFrame::InterpretedRFrame(frame fr, JavaThread* thread, RFrame*const callee)
-: RFrame(fr, thread, callee) {
- RegisterMap map(thread, false);
- _vf = javaVFrame::cast(vframe::new_vframe(&_fr, &map, thread));
- _method = _vf->method();
- assert( _vf->is_interpreted_frame(), "must be interpreted");
- init();
-}
-
-InterpretedRFrame::InterpretedRFrame(frame fr, JavaThread* thread, Method* m)
-: RFrame(fr, thread, NULL) {
- RegisterMap map(thread, false);
- _vf = javaVFrame::cast(vframe::new_vframe(&_fr, &map, thread));
- _method = m;
-
- assert( _vf->is_interpreted_frame(), "must be interpreted");
- init();
-}
-
-CompiledRFrame::CompiledRFrame(frame fr, JavaThread* thread, RFrame*const callee)
-: RFrame(fr, thread, callee) {
- init();
-}
-
-CompiledRFrame::CompiledRFrame(frame fr, JavaThread* thread)
-: RFrame(fr, thread, NULL) {
- init();
-}
-
-DeoptimizedRFrame::DeoptimizedRFrame(frame fr, JavaThread* thread, RFrame*const callee)
-: InterpretedRFrame(fr, thread, callee) {}
-
-RFrame* RFrame::new_RFrame(frame fr, JavaThread* thread, RFrame*const callee) {
- RFrame* rf = NULL;
- int dist = callee ? callee->distance() : -1;
- if (fr.is_interpreted_frame()) {
- rf = new InterpretedRFrame(fr, thread, callee);
- dist++;
- } else if (fr.is_compiled_frame()) {
- // Even deopted frames look compiled because the deopt
- // is invisible until it happens.
- rf = new CompiledRFrame(fr, thread, callee);
- } else {
- assert(false, "Unhandled frame type");
- }
- if (rf != NULL) {
- rf->set_distance(dist);
- rf->init();
- }
- return rf;
-}
-
-RFrame* RFrame::caller() {
- if (_caller != noCallerYet) return (_caller == noCaller) ? NULL : _caller; // already computed caller
-
- // caller not yet computed; do it now
- if (_fr.is_first_java_frame()) {
- _caller = (RFrame*)noCaller;
- return NULL;
- }
-
- RegisterMap map(_thread, false);
- frame sender = _fr.real_sender(&map);
- if (sender.is_java_frame()) {
- _caller = new_RFrame(sender, thread(), this);
- return _caller;
- }
-
- // Real caller is not java related
- _caller = (RFrame*)noCaller;
- return NULL;
-}
-
-int InterpretedRFrame::cost() const {
- return _method->code_size(); // fix this
- //return _method->estimated_inline_cost(_receiverKlass);
-}
-
-int CompiledRFrame::cost() const {
- CompiledMethod* nm = top_method()->code();
- if (nm != NULL) {
- return nm->insts_size();
- } else {
- return top_method()->code_size();
- }
-}
-
-void CompiledRFrame::init() {
- RegisterMap map(thread(), false);
- vframe* vf = vframe::new_vframe(&_fr, &map, thread());
- assert(vf->is_compiled_frame(), "must be compiled");
- _nm = compiledVFrame::cast(vf)->code()->as_nmethod();
- vf = vf->top();
- _vf = javaVFrame::cast(vf);
- _method = CodeCache::find_nmethod(_fr.pc())->method();
- assert(_method, "should have found a method");
-#ifndef PRODUCT
- _invocations = _method->compiled_invocation_count();
-#endif
-}
-
-void InterpretedRFrame::init() {
- _invocations = _method->invocation_count() + _method->backedge_count();
-}
-
-void RFrame::print(const char* kind) {
-#ifndef PRODUCT
-#if COMPILER2_OR_JVMCI
- int cnt = top_method()->interpreter_invocation_count();
-#else
- int cnt = top_method()->invocation_count();
-#endif
- tty->print("%3d %s ", _num, is_interpreted() ? "I" : "C");
- top_method()->print_short_name(tty);
- tty->print_cr(": inv=%5d(%d) cst=%4d", _invocations, cnt, cost());
-#endif
-}
-
-void CompiledRFrame::print() {
- RFrame::print("comp");
-}
-
-void InterpretedRFrame::print() {
- RFrame::print("int.");
-}
-
-void DeoptimizedRFrame::print() {
- RFrame::print("deopt.");
-}
--- a/src/hotspot/share/runtime/rframe.hpp Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_RUNTIME_RFRAME_HPP
-#define SHARE_RUNTIME_RFRAME_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/frame.hpp"
-
-// rframes ("recompiler frames") decorate stack frames with some extra information
-// needed by the recompiler. The recompiler views the stack (at the time of recompilation)
-// as a list of rframes.
-
-class RFrame : public ResourceObj {
- protected:
- const frame _fr; // my frame
- JavaThread* const _thread; // thread where frame resides.
- RFrame* _caller; // caller / callee rframes (or NULL)
- RFrame*const _callee;
- const int _num; // stack frame number (0 = most recent)
- int _invocations; // current invocation estimate (for this frame)
- // (i.e., how often was this frame called)
- int _distance; // recompilation search "distance" (measured in # of interpreted frames)
-
- RFrame(frame fr, JavaThread* thread, RFrame*const callee);
- virtual void init() = 0; // compute invocations, loopDepth, etc.
- void print(const char* name);
-
- public:
-
- static RFrame* new_RFrame(frame fr, JavaThread* thread, RFrame*const callee);
-
- virtual bool is_interpreted() const { return false; }
- virtual bool is_compiled() const { return false; }
- int distance() const { return _distance; }
- void set_distance(int d);
- int invocations() const { return _invocations; }
- int num() const { return _num; }
- frame fr() const { return _fr; }
- JavaThread* thread() const { return _thread; }
- virtual int cost() const = 0; // estimated inlining cost (size)
- virtual Method* top_method() const = 0;
- virtual javaVFrame* top_vframe() const = 0;
- virtual nmethod* nm() const { ShouldNotCallThis(); return NULL; }
-
- RFrame* caller();
- RFrame* callee() const { return _callee; }
- RFrame* parent() const; // rframe containing lexical scope (if any)
- virtual void print() = 0;
-
- static int computeSends(Method* m);
- static int computeSends(nmethod* nm);
- static int computeCumulSends(Method* m);
- static int computeCumulSends(nmethod* nm);
-};
-
-class CompiledRFrame : public RFrame { // frame containing a compiled method
- protected:
- nmethod* _nm;
- javaVFrame* _vf; // top vframe; may be NULL (for most recent frame)
- Method* _method; // top method
-
- CompiledRFrame(frame fr, JavaThread* thread, RFrame*const callee);
- void init();
- friend class RFrame;
-
- public:
- CompiledRFrame(frame fr, JavaThread* thread); // for nmethod triggering its counter (callee == NULL)
- bool is_compiled() const { return true; }
- Method* top_method() const { return _method; }
- javaVFrame* top_vframe() const { return _vf; }
- nmethod* nm() const { return _nm; }
- int cost() const;
- void print();
-};
-
-class InterpretedRFrame : public RFrame { // interpreter frame
- protected:
- javaVFrame* _vf; // may be NULL (for most recent frame)
- Method* _method;
-
- InterpretedRFrame(frame fr, JavaThread* thread, RFrame*const callee);
- void init();
- friend class RFrame;
-
- public:
- InterpretedRFrame(frame fr, JavaThread* thread, Method* m); // constructor for method triggering its invocation counter
- bool is_interpreted() const { return true; }
- Method* top_method() const { return _method; }
- javaVFrame* top_vframe() const { return _vf; }
- int cost() const;
- void print();
-};
-
-// treat deoptimized frames as interpreted
-class DeoptimizedRFrame : public InterpretedRFrame {
- protected:
- DeoptimizedRFrame(frame fr, JavaThread* thread, RFrame*const callee);
- friend class RFrame;
- public:
- void print();
-};
-
-#endif // SHARE_RUNTIME_RFRAME_HPP
--- a/src/hotspot/share/runtime/safepoint.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/safepoint.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -33,6 +33,7 @@
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "code/scopeDesc.hpp"
+#include "compiler/compilationPolicy.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/oopStorage.hpp"
@@ -47,7 +48,6 @@
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/runtime/serviceThread.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/serviceThread.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -43,6 +43,7 @@
#include "services/diagnosticFramework.hpp"
#include "services/gcNotifier.hpp"
#include "services/lowMemoryDetector.hpp"
+#include "services/threadIdTable.hpp"
ServiceThread* ServiceThread::_instance = NULL;
@@ -101,6 +102,7 @@
bool stringtable_work = false;
bool symboltable_work = false;
bool resolved_method_table_work = false;
+ bool thread_id_table_work = false;
bool protection_domain_table_work = false;
bool oopstorage_work = false;
JvmtiDeferredEvent jvmti_event;
@@ -120,13 +122,14 @@
// only the first recognized bit of work, to avoid frequently true early
// tests from potentially starving later work. Hence the use of
// arithmetic-or to combine results; we don't want short-circuiting.
- while (((sensors_changed = LowMemoryDetector::has_pending_requests()) |
+ while (((sensors_changed = (!UseNotificationThread && LowMemoryDetector::has_pending_requests())) |
(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) |
- (has_gc_notification_event = GCNotifier::has_event()) |
- (has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) |
+ (has_gc_notification_event = (!UseNotificationThread && GCNotifier::has_event())) |
+ (has_dcmd_notification_event = (!UseNotificationThread && DCmdFactory::has_pending_jmx_notification())) |
(stringtable_work = StringTable::has_work()) |
(symboltable_work = SymbolTable::has_work()) |
(resolved_method_table_work = ResolvedMethodTable::has_work()) |
+ (thread_id_table_work = ThreadIdTable::has_work()) |
(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
(oopstorage_work = OopStorage::has_cleanup_work_and_reset())
) == 0) {
@@ -151,22 +154,28 @@
jvmti_event.post();
}
- if (sensors_changed) {
- LowMemoryDetector::process_sensor_changes(jt);
- }
+ if (!UseNotificationThread) {
+ if (sensors_changed) {
+ LowMemoryDetector::process_sensor_changes(jt);
+ }
- if(has_gc_notification_event) {
- GCNotifier::sendNotification(CHECK);
- }
+ if(has_gc_notification_event) {
+ GCNotifier::sendNotification(CHECK);
+ }
- if(has_dcmd_notification_event) {
- DCmdFactory::send_notification(CHECK);
+ if(has_dcmd_notification_event) {
+ DCmdFactory::send_notification(CHECK);
+ }
}
if (resolved_method_table_work) {
ResolvedMethodTable::do_concurrent_work(jt);
}
+ if (thread_id_table_work) {
+ ThreadIdTable::do_concurrent_work(jt);
+ }
+
if (protection_domain_table_work) {
SystemDictionary::pd_cache_table()->unlink();
}
--- a/src/hotspot/share/runtime/serviceThread.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/serviceThread.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -27,8 +27,10 @@
#include "runtime/thread.hpp"
-// A JavaThread for low memory detection support and JVMTI
-// compiled-method-load events.
+// A hidden from external view JavaThread for JVMTI compiled-method-load
+// events, oop storage cleanup, and the maintainance of string, symbol,
+// protection domain, and resolved method tables.
+
class ServiceThread : public JavaThread {
friend class VMStructs;
private:
--- a/src/hotspot/share/runtime/sharedRuntime.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -57,7 +57,6 @@
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
@@ -1269,6 +1268,7 @@
// will be supported.
if (!callee_method->is_old() &&
(callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
+ NoSafepointVerifier nsv;
#ifdef ASSERT
// We must not try to patch to jump to an already unloaded method.
if (dest_entry_point != 0) {
--- a/src/hotspot/share/runtime/sweeper.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/sweeper.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -38,7 +38,6 @@
#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/compilationPolicy.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/handshake.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/share/runtime/thread.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/thread.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -258,6 +258,7 @@
_current_pending_monitor = NULL;
_current_pending_monitor_is_from_java = true;
_current_waiting_monitor = NULL;
+ _current_pending_raw_monitor = NULL;
_num_nested_signal = 0;
om_free_list = NULL;
om_free_count = 0;
@@ -3847,7 +3848,7 @@
// Create the VMThread
{ TraceTime timer("Start VMThread", TRACETIME_LOG(Info, startuptime));
- VMThread::create();
+ VMThread::create();
Thread* vmthread = VMThread::vm_thread();
if (!os::create_thread(vmthread, os::vm_thread)) {
@@ -4185,7 +4186,7 @@
for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
// CDS dumping does not support native JVMTI agent.
// CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified.
- if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
+ if (Arguments::is_dumping_archive()) {
if(!agent->is_instrument_lib()) {
vm_exit_during_cds_dumping("CDS dumping does not support native JVMTI agent, name", agent->name());
} else if (!AllowArchivingWithJavaAgent) {
--- a/src/hotspot/share/runtime/thread.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/thread.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -62,6 +62,7 @@
class ThreadsList;
class ThreadsSMRSupport;
+class JvmtiRawMonitor;
class JvmtiThreadState;
class ThreadStatistics;
class ConcurrentLocksDump;
@@ -404,6 +405,9 @@
ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
// is waiting to lock
bool _current_pending_monitor_is_from_java; // locking is from Java code
+ JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread
+ // is waiting to lock
+
// ObjectMonitor on which this thread called Object.wait()
ObjectMonitor* _current_waiting_monitor;
@@ -640,6 +644,14 @@
_current_waiting_monitor = monitor;
}
+ // For tracking the Jvmti raw monitor the thread is pending on.
+ JvmtiRawMonitor* current_pending_raw_monitor() {
+ return _current_pending_raw_monitor;
+ }
+ void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) {
+ _current_pending_raw_monitor = monitor;
+ }
+
// GC support
// Apply "f->do_oop" to all root oops in "this".
// Used by JavaThread::oops_do.
@@ -786,7 +798,7 @@
public:
volatile intptr_t _Stalled;
volatile int _TypeTag;
- ParkEvent * _ParkEvent; // for synchronized()
+ ParkEvent * _ParkEvent; // for Object monitors and JVMTI raw monitors
ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
int NativeSyncRecursion; // diagnostic
--- a/src/hotspot/share/runtime/threadSMR.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/threadSMR.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -26,9 +26,11 @@
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "runtime/vmOperations.hpp"
+#include "services/threadIdTable.hpp"
#include "services/threadService.hpp"
#include "utilities/copy.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -129,7 +131,6 @@
// Impl note: See _to_delete_list_cnt note.
uint ThreadsSMRSupport::_to_delete_list_max = 0;
-
// 'inline' functions first so the definitions are before first use:
inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) {
@@ -608,16 +609,28 @@
}
JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const {
- for (uint i = 0; i < length(); i++) {
- JavaThread* thread = thread_at(i);
- oop tobj = thread->threadObj();
- // Ignore the thread if it hasn't run yet, has exited
- // or is starting to exit.
- if (tobj != NULL && !thread->is_exiting() &&
- java_tid == java_lang_Thread::thread_id(tobj)) {
- // found a match
- return thread;
+ ThreadIdTable::lazy_initialize(this);
+ JavaThread* thread = ThreadIdTable::find_thread_by_tid(java_tid);
+ if (thread == NULL) {
+ // If the thread is not found in the table find it
+ // with a linear search and add to the table.
+ for (uint i = 0; i < length(); i++) {
+ thread = thread_at(i);
+ oop tobj = thread->threadObj();
+ // Ignore the thread if it hasn't run yet, has exited
+ // or is starting to exit.
+ if (tobj != NULL && java_tid == java_lang_Thread::thread_id(tobj)) {
+ MutexLocker ml(Threads_lock);
+ // Must be inside the lock to ensure that we don't add a thread to the table
+ // that has just passed the removal point in ThreadsSMRSupport::remove_thread()
+ if (!thread->is_exiting()) {
+ ThreadIdTable::add_thread(java_tid, thread);
+ return thread;
+ }
+ }
}
+ } else if (!thread->is_exiting()) {
+ return thread;
}
return NULL;
}
@@ -742,6 +755,10 @@
ThreadsList *old_list = xchg_java_thread_list(new_list);
free_list(old_list);
+ if (ThreadIdTable::is_initialized()) {
+ jlong tid = SharedRuntime::get_java_tid(thread);
+ ThreadIdTable::add_thread(tid, thread);
+ }
}
// set_delete_notify() and clear_delete_notify() are called
@@ -909,6 +926,10 @@
}
void ThreadsSMRSupport::remove_thread(JavaThread *thread) {
+ if (ThreadIdTable::is_initialized()) {
+ jlong tid = SharedRuntime::get_java_tid(thread);
+ ThreadIdTable::remove_thread(tid);
+ }
ThreadsList *new_list = ThreadsList::remove_thread(ThreadsSMRSupport::get_java_thread_list(), thread);
if (EnableThreadSMRStatistics) {
ThreadsSMRSupport::inc_java_thread_list_alloc_cnt();
--- a/src/hotspot/share/runtime/tieredThresholdPolicy.cpp Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1005 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "compiler/compileBroker.hpp"
-#include "compiler/compilerOracle.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/safepointVerifiers.hpp"
-#include "runtime/tieredThresholdPolicy.hpp"
-#include "code/scopeDesc.hpp"
-#include "oops/method.inline.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
-
-#ifdef TIERED
-
-#include "c1/c1_Compiler.hpp"
-#include "opto/c2compiler.hpp"
-
-template<CompLevel level>
-bool TieredThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) {
- double threshold_scaling;
- if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
- scale *= threshold_scaling;
- }
- switch(level) {
- case CompLevel_aot:
- return (i >= Tier3AOTInvocationThreshold * scale) ||
- (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale);
- case CompLevel_none:
- case CompLevel_limited_profile:
- return (i >= Tier3InvocationThreshold * scale) ||
- (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
- case CompLevel_full_profile:
- return (i >= Tier4InvocationThreshold * scale) ||
- (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
- }
- return true;
-}
-
-template<CompLevel level>
-bool TieredThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) {
- double threshold_scaling;
- if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
- scale *= threshold_scaling;
- }
- switch(level) {
- case CompLevel_aot:
- return b >= Tier3AOTBackEdgeThreshold * scale;
- case CompLevel_none:
- case CompLevel_limited_profile:
- return b >= Tier3BackEdgeThreshold * scale;
- case CompLevel_full_profile:
- return b >= Tier4BackEdgeThreshold * scale;
- }
- return true;
-}
-
-// Simple methods are as good being compiled with C1 as C2.
-// Determine if a given method is such a case.
-bool TieredThresholdPolicy::is_trivial(Method* method) {
- if (method->is_accessor() ||
- method->is_constant_getter()) {
- return true;
- }
- return false;
-}
-
-bool TieredThresholdPolicy::should_compile_at_level_simple(Method* method) {
- if (TieredThresholdPolicy::is_trivial(method)) {
- return true;
- }
-#if INCLUDE_JVMCI
- if (UseJVMCICompiler) {
- AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
- if (comp != NULL && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
- return true;
- }
- }
-#endif
- return false;
-}
-
-CompLevel TieredThresholdPolicy::comp_level(Method* method) {
- CompiledMethod *nm = method->code();
- if (nm != NULL && nm->is_in_use()) {
- return (CompLevel)nm->comp_level();
- }
- return CompLevel_none;
-}
-
-void TieredThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) {
- int invocation_count = mh->invocation_count();
- int backedge_count = mh->backedge_count();
- MethodData* mdh = mh->method_data();
- int mdo_invocations = 0, mdo_backedges = 0;
- int mdo_invocations_start = 0, mdo_backedges_start = 0;
- if (mdh != NULL) {
- mdo_invocations = mdh->invocation_count();
- mdo_backedges = mdh->backedge_count();
- mdo_invocations_start = mdh->invocation_count_start();
- mdo_backedges_start = mdh->backedge_count_start();
- }
- tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
- invocation_count, backedge_count, prefix,
- mdo_invocations, mdo_invocations_start,
- mdo_backedges, mdo_backedges_start);
- tty->print(" %smax levels=%d,%d", prefix,
- mh->highest_comp_level(), mh->highest_osr_comp_level());
-}
-
-// Print an event.
-void TieredThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh,
- int bci, CompLevel level) {
- bool inlinee_event = mh() != imh();
-
- ttyLocker tty_lock;
- tty->print("%lf: [", os::elapsedTime());
-
- switch(type) {
- case CALL:
- tty->print("call");
- break;
- case LOOP:
- tty->print("loop");
- break;
- case COMPILE:
- tty->print("compile");
- break;
- case REMOVE_FROM_QUEUE:
- tty->print("remove-from-queue");
- break;
- case UPDATE_IN_QUEUE:
- tty->print("update-in-queue");
- break;
- case REPROFILE:
- tty->print("reprofile");
- break;
- case MAKE_NOT_ENTRANT:
- tty->print("make-not-entrant");
- break;
- default:
- tty->print("unknown");
- }
-
- tty->print(" level=%d ", level);
-
- ResourceMark rm;
- char *method_name = mh->name_and_sig_as_C_string();
- tty->print("[%s", method_name);
- if (inlinee_event) {
- char *inlinee_name = imh->name_and_sig_as_C_string();
- tty->print(" [%s]] ", inlinee_name);
- }
- else tty->print("] ");
- tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
- CompileBroker::queue_size(CompLevel_full_optimization));
-
- print_specific(type, mh, imh, bci, level);
-
- if (type != COMPILE) {
- print_counters("", mh);
- if (inlinee_event) {
- print_counters("inlinee ", imh);
- }
- tty->print(" compilable=");
- bool need_comma = false;
- if (!mh->is_not_compilable(CompLevel_full_profile)) {
- tty->print("c1");
- need_comma = true;
- }
- if (!mh->is_not_osr_compilable(CompLevel_full_profile)) {
- if (need_comma) tty->print(",");
- tty->print("c1-osr");
- need_comma = true;
- }
- if (!mh->is_not_compilable(CompLevel_full_optimization)) {
- if (need_comma) tty->print(",");
- tty->print("c2");
- need_comma = true;
- }
- if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) {
- if (need_comma) tty->print(",");
- tty->print("c2-osr");
- }
- tty->print(" status=");
- if (mh->queued_for_compilation()) {
- tty->print("in-queue");
- } else tty->print("idle");
- }
- tty->print_cr("]");
-}
-
-void TieredThresholdPolicy::initialize() {
- int count = CICompilerCount;
- bool c1_only = TieredStopAtLevel < CompLevel_full_optimization;
-#ifdef _LP64
- // Turn on ergonomic compiler count selection
- if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
- FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
- }
- if (CICompilerCountPerCPU) {
- // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
- int log_cpu = log2_int(os::active_processor_count());
- int loglog_cpu = log2_int(MAX2(log_cpu, 1));
- count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
- // Make sure there is enough space in the code cache to hold all the compiler buffers
- size_t c1_size = Compiler::code_buffer_size();
- size_t c2_size = C2Compiler::initial_code_buffer_size();
- size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
- int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
- if (count > max_count) {
- // Lower the compiler count such that all buffers fit into the code cache
- count = MAX2(max_count, c1_only ? 1 : 2);
- }
- FLAG_SET_ERGO(CICompilerCount, count);
- }
-#else
- // On 32-bit systems, the number of compiler threads is limited to 3.
- // On these systems, the virtual address space available to the JVM
- // is usually limited to 2-4 GB (the exact value depends on the platform).
- // As the compilers (especially C2) can consume a large amount of
- // memory, scaling the number of compiler threads with the number of
- // available cores can result in the exhaustion of the address space
- /// available to the VM and thus cause the VM to crash.
- if (FLAG_IS_DEFAULT(CICompilerCount)) {
- count = 3;
- FLAG_SET_ERGO(CICompilerCount, count);
- }
-#endif
-
- if (c1_only) {
- // No C2 compiler thread required
- set_c1_count(count);
- } else {
- set_c1_count(MAX2(count / 3, 1));
- set_c2_count(MAX2(count - c1_count(), 1));
- }
- assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
-
- // Some inlining tuning
-#ifdef X86
- if (FLAG_IS_DEFAULT(InlineSmallCode)) {
- FLAG_SET_DEFAULT(InlineSmallCode, 2000);
- }
-#endif
-
-#if defined SPARC || defined AARCH64
- if (FLAG_IS_DEFAULT(InlineSmallCode)) {
- FLAG_SET_DEFAULT(InlineSmallCode, 2500);
- }
-#endif
-
- set_increase_threshold_at_ratio();
- set_start_time(os::javaTimeMillis());
-}
-
-void TieredThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
- if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) {
- counter->set_carry_flag();
- }
-}
-
-// Set carry flags on the counters if necessary
-void TieredThresholdPolicy::handle_counter_overflow(Method* method) {
- MethodCounters *mcs = method->method_counters();
- if (mcs != NULL) {
- set_carry_if_necessary(mcs->invocation_counter());
- set_carry_if_necessary(mcs->backedge_counter());
- }
- MethodData* mdo = method->method_data();
- if (mdo != NULL) {
- set_carry_if_necessary(mdo->invocation_counter());
- set_carry_if_necessary(mdo->backedge_counter());
- }
-}
-
-// Called with the queue locked and with at least one element
-CompileTask* TieredThresholdPolicy::select_task(CompileQueue* compile_queue) {
- CompileTask *max_blocking_task = NULL;
- CompileTask *max_task = NULL;
- Method* max_method = NULL;
- jlong t = os::javaTimeMillis();
- // Iterate through the queue and find a method with a maximum rate.
- for (CompileTask* task = compile_queue->first(); task != NULL;) {
- CompileTask* next_task = task->next();
- Method* method = task->method();
- // If a method was unloaded or has been stale for some time, remove it from the queue.
- // Blocking tasks and tasks submitted from whitebox API don't become stale
- if (task->is_unloaded() || (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method))) {
- if (!task->is_unloaded()) {
- if (PrintTieredEvents) {
- print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
- }
- method->clear_queued_for_compilation();
- }
- compile_queue->remove_and_mark_stale(task);
- task = next_task;
- continue;
- }
- update_rate(t, method);
- if (max_task == NULL || compare_methods(method, max_method)) {
- // Select a method with the highest rate
- max_task = task;
- max_method = method;
- }
-
- if (task->is_blocking()) {
- if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
- max_blocking_task = task;
- }
- }
-
- task = next_task;
- }
-
- if (max_blocking_task != NULL) {
- // In blocking compilation mode, the CompileBroker will make
- // compilations submitted by a JVMCI compiler thread non-blocking. These
- // compilations should be scheduled after all blocking compilations
- // to service non-compiler related compilations sooner and reduce the
- // chance of such compilations timing out.
- max_task = max_blocking_task;
- max_method = max_task->method();
- }
-
- if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile &&
- TieredStopAtLevel > CompLevel_full_profile &&
- max_method != NULL && is_method_profiled(max_method)) {
- max_task->set_comp_level(CompLevel_limited_profile);
-
- if (CompileBroker::compilation_is_complete(max_method, max_task->osr_bci(), CompLevel_limited_profile)) {
- if (PrintTieredEvents) {
- print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
- }
- compile_queue->remove_and_mark_stale(max_task);
- max_method->clear_queued_for_compilation();
- return NULL;
- }
-
- if (PrintTieredEvents) {
- print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
- }
- }
-
- return max_task;
-}
-
-void TieredThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
- for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
- if (PrintTieredEvents) {
- methodHandle mh(sd->method());
- print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none);
- }
- MethodData* mdo = sd->method()->method_data();
- if (mdo != NULL) {
- mdo->reset_start_counters();
- }
- if (sd->is_top()) break;
- }
-}
-
-nmethod* TieredThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee,
- int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
- if (comp_level == CompLevel_none &&
- JvmtiExport::can_post_interpreter_events() &&
- thread->is_interp_only_mode()) {
- return NULL;
- }
- if (ReplayCompiles) {
- // Don't trigger other compiles in testing mode
- return NULL;
- }
-
- handle_counter_overflow(method());
- if (method() != inlinee()) {
- handle_counter_overflow(inlinee());
- }
-
- if (PrintTieredEvents) {
- print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level);
- }
-
- if (bci == InvocationEntryBci) {
- method_invocation_event(method, inlinee, comp_level, nm, thread);
- } else {
- // method == inlinee if the event originated in the main method
- method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
- // Check if event led to a higher level OSR compilation
- CompLevel expected_comp_level = comp_level;
- if (inlinee->is_not_osr_compilable(expected_comp_level)) {
- // It's not possble to reach the expected level so fall back to simple.
- expected_comp_level = CompLevel_simple;
- }
- nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
- assert(osr_nm == NULL || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
- if (osr_nm != NULL) {
- // Perform OSR with new nmethod
- return osr_nm;
- }
- }
- return NULL;
-}
-
-// Check if the method can be compiled, change level if necessary
-void TieredThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
- assert(level <= TieredStopAtLevel, "Invalid compilation level");
- if (level == CompLevel_none) {
- return;
- }
- if (level == CompLevel_aot) {
- if (mh->has_aot_code()) {
- if (PrintTieredEvents) {
- print_event(COMPILE, mh, mh, bci, level);
- }
- MutexLocker ml(Compile_lock);
- NoSafepointVerifier nsv;
- if (mh->has_aot_code() && mh->code() != mh->aot_code()) {
- mh->aot_code()->make_entrant();
- if (mh->has_compiled_code()) {
- mh->code()->make_not_entrant();
- }
- MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
- Method::set_code(mh, mh->aot_code());
- }
- }
- return;
- }
-
- // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
- // in the interpreter and then compile with C2 (the transition function will request that,
- // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
- // pure C1.
- if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
- if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
- compile(mh, bci, CompLevel_simple, thread);
- }
- return;
- }
- if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
- if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
- nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
- if (osr_nm != NULL && osr_nm->comp_level() > CompLevel_simple) {
- // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
- osr_nm->make_not_entrant();
- }
- compile(mh, bci, CompLevel_simple, thread);
- }
- return;
- }
- if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
- return;
- }
- if (!CompileBroker::compilation_is_in_queue(mh)) {
- if (PrintTieredEvents) {
- print_event(COMPILE, mh, mh, bci, level);
- }
- submit_compile(mh, bci, level, thread);
- }
-}
-
-// Update the rate and submit compile
-void TieredThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
- int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
- update_rate(os::javaTimeMillis(), mh());
- CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
-}
-
-// Print an event.
-void TieredThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh,
- int bci, CompLevel level) {
- tty->print(" rate=");
- if (mh->prev_time() == 0) tty->print("n/a");
- else tty->print("%f", mh->rate());
-
- tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
- threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
-
-}
-
-// update_rate() is called from select_task() while holding a compile queue lock.
-void TieredThresholdPolicy::update_rate(jlong t, Method* m) {
- // Skip update if counters are absent.
- // Can't allocate them since we are holding compile queue lock.
- if (m->method_counters() == NULL) return;
-
- if (is_old(m)) {
- // We don't remove old methods from the queue,
- // so we can just zero the rate.
- m->set_rate(0);
- return;
- }
-
- // We don't update the rate if we've just came out of a safepoint.
- // delta_s is the time since last safepoint in milliseconds.
- jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms();
- jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
- // How many events were there since the last time?
- int event_count = m->invocation_count() + m->backedge_count();
- int delta_e = event_count - m->prev_event_count();
-
- // We should be running for at least 1ms.
- if (delta_s >= TieredRateUpdateMinTime) {
- // And we must've taken the previous point at least 1ms before.
- if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
- m->set_prev_time(t);
- m->set_prev_event_count(event_count);
- m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
- } else {
- if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
- // If nothing happened for 25ms, zero the rate. Don't modify prev values.
- m->set_rate(0);
- }
- }
- }
-}
-
-// Check if this method has been stale for a given number of milliseconds.
-// See select_task().
-bool TieredThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
- jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms();
- jlong delta_t = t - m->prev_time();
- if (delta_t > timeout && delta_s > timeout) {
- int event_count = m->invocation_count() + m->backedge_count();
- int delta_e = event_count - m->prev_event_count();
- // Return true if there were no events.
- return delta_e == 0;
- }
- return false;
-}
-
-// We don't remove old methods from the compile queue even if they have
-// very low activity. See select_task().
-bool TieredThresholdPolicy::is_old(Method* method) {
- return method->invocation_count() > 50000 || method->backedge_count() > 500000;
-}
-
-double TieredThresholdPolicy::weight(Method* method) {
- return (double)(method->rate() + 1) *
- (method->invocation_count() + 1) * (method->backedge_count() + 1);
-}
-
-// Apply heuristics and return true if x should be compiled before y
-bool TieredThresholdPolicy::compare_methods(Method* x, Method* y) {
- if (x->highest_comp_level() > y->highest_comp_level()) {
- // recompilation after deopt
- return true;
- } else
- if (x->highest_comp_level() == y->highest_comp_level()) {
- if (weight(x) > weight(y)) {
- return true;
- }
- }
- return false;
-}
-
-// Is method profiled enough?
-bool TieredThresholdPolicy::is_method_profiled(Method* method) {
- MethodData* mdo = method->method_data();
- if (mdo != NULL) {
- int i = mdo->invocation_count_delta();
- int b = mdo->backedge_count_delta();
- return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
- }
- return false;
-}
-
-double TieredThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
- double queue_size = CompileBroker::queue_size(level);
- int comp_count = compiler_count(level);
- double k = queue_size / (feedback_k * comp_count) + 1;
-
- // Increase C1 compile threshold when the code cache is filled more
- // than specified by IncreaseFirstTierCompileThresholdAt percentage.
- // The main intention is to keep enough free space for C2 compiled code
- // to achieve peak performance if the code cache is under stress.
- if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
- double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
- if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
- k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
- }
- }
- return k;
-}
-
-// Call and loop predicates determine whether a transition to a higher
-// compilation level should be performed (pointers to predicate functions
-// are passed to common()).
-// Tier?LoadFeedback is basically a coefficient that determines of
-// how many methods per compiler thread can be in the queue before
-// the threshold values double.
-bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
- switch(cur_level) {
- case CompLevel_aot: {
- double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- return loop_predicate_helper<CompLevel_aot>(i, b, k, method);
- }
- case CompLevel_none:
- case CompLevel_limited_profile: {
- double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- return loop_predicate_helper<CompLevel_none>(i, b, k, method);
- }
- case CompLevel_full_profile: {
- double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
- return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
- }
- default:
- return true;
- }
-}
-
-bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
- switch(cur_level) {
- case CompLevel_aot: {
- double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- return call_predicate_helper<CompLevel_aot>(i, b, k, method);
- }
- case CompLevel_none:
- case CompLevel_limited_profile: {
- double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- return call_predicate_helper<CompLevel_none>(i, b, k, method);
- }
- case CompLevel_full_profile: {
- double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
- return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
- }
- default:
- return true;
- }
-}
-
-// Determine is a method is mature.
-bool TieredThresholdPolicy::is_mature(Method* method) {
- if (should_compile_at_level_simple(method)) return true;
- MethodData* mdo = method->method_data();
- if (mdo != NULL) {
- int i = mdo->invocation_count();
- int b = mdo->backedge_count();
- double k = ProfileMaturityPercentage / 100.0;
- return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) ||
- loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
- }
- return false;
-}
-
-// If a method is old enough and is still in the interpreter we would want to
-// start profiling without waiting for the compiled method to arrive.
-// We also take the load on compilers into the account.
-bool TieredThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
- if (cur_level == CompLevel_none &&
- CompileBroker::queue_size(CompLevel_full_optimization) <=
- Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
- int i = method->invocation_count();
- int b = method->backedge_count();
- double k = Tier0ProfilingStartPercentage / 100.0;
- return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
- }
- return false;
-}
-
-// Inlining control: if we're compiling a profiled method with C1 and the callee
-// is known to have OSRed in a C2 version, don't inline it.
-bool TieredThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
- CompLevel comp_level = (CompLevel)env->comp_level();
- if (comp_level == CompLevel_full_profile ||
- comp_level == CompLevel_limited_profile) {
- return callee->highest_osr_comp_level() == CompLevel_full_optimization;
- }
- return false;
-}
-
-// Create MDO if necessary.
-void TieredThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
- if (mh->is_native() ||
- mh->is_abstract() ||
- mh->is_accessor() ||
- mh->is_constant_getter()) {
- return;
- }
- if (mh->method_data() == NULL) {
- Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
- }
-}
-
-
-/*
- * Method states:
- * 0 - interpreter (CompLevel_none)
- * 1 - pure C1 (CompLevel_simple)
- * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
- * 3 - C1 with full profiling (CompLevel_full_profile)
- * 4 - C2 (CompLevel_full_optimization)
- *
- * Common state transition patterns:
- * a. 0 -> 3 -> 4.
- * The most common path. But note that even in this straightforward case
- * profiling can start at level 0 and finish at level 3.
- *
- * b. 0 -> 2 -> 3 -> 4.
- * This case occurs when the load on C2 is deemed too high. So, instead of transitioning
- * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
- * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
- *
- * c. 0 -> (3->2) -> 4.
- * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
- * to enable the profiling to fully occur at level 0. In this case we change the compilation level
- * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
- * without full profiling while c2 is compiling.
- *
- * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
- * After a method was once compiled with C1 it can be identified as trivial and be compiled to
- * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
- *
- * e. 0 -> 4.
- * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
- * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
- * the compiled version already exists).
- *
- * Note that since state 0 can be reached from any other state via deoptimization different loops
- * are possible.
- *
- */
-
-// Common transition function. Given a predicate determines if a method should transition to another level.
-CompLevel TieredThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
- CompLevel next_level = cur_level;
- int i = method->invocation_count();
- int b = method->backedge_count();
-
- if (should_compile_at_level_simple(method)) {
- next_level = CompLevel_simple;
- } else {
- switch(cur_level) {
- default: break;
- case CompLevel_aot: {
- // If we were at full profile level, would we switch to full opt?
- if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
- next_level = CompLevel_full_optimization;
- } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
- Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
- (this->*p)(i, b, cur_level, method))) {
- next_level = CompLevel_full_profile;
- }
- }
- break;
- case CompLevel_none:
- // If we were at full profile level, would we switch to full opt?
- if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
- next_level = CompLevel_full_optimization;
- } else if ((this->*p)(i, b, cur_level, method)) {
-#if INCLUDE_JVMCI
- if (EnableJVMCI && UseJVMCICompiler) {
- // Since JVMCI takes a while to warm up, its queue inevitably backs up during
- // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
- // compilation method and all potential inlinees have mature profiles (which
- // includes type profiling). If it sees immature profiles, JVMCI's inliner
- // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
- // exploring/inlining too many graphs). Since a rewrite of the inliner is
- // in progress, we simply disable the dialing back heuristic for now and will
- // revisit this decision once the new inliner is completed.
- next_level = CompLevel_full_profile;
- } else
-#endif
- {
- // C1-generated fully profiled code is about 30% slower than the limited profile
- // code that has only invocation and backedge counters. The observation is that
- // if C2 queue is large enough we can spend too much time in the fully profiled code
- // while waiting for C2 to pick the method from the queue. To alleviate this problem
- // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
- // we choose to compile a limited profiled version and then recompile with full profiling
- // when the load on C2 goes down.
- if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
- Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
- next_level = CompLevel_limited_profile;
- } else {
- next_level = CompLevel_full_profile;
- }
- }
- }
- break;
- case CompLevel_limited_profile:
- if (is_method_profiled(method)) {
- // Special case: we got here because this method was fully profiled in the interpreter.
- next_level = CompLevel_full_optimization;
- } else {
- MethodData* mdo = method->method_data();
- if (mdo != NULL) {
- if (mdo->would_profile()) {
- if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
- Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
- (this->*p)(i, b, cur_level, method))) {
- next_level = CompLevel_full_profile;
- }
- } else {
- next_level = CompLevel_full_optimization;
- }
- } else {
- // If there is no MDO we need to profile
- if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
- Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
- (this->*p)(i, b, cur_level, method))) {
- next_level = CompLevel_full_profile;
- }
- }
- }
- break;
- case CompLevel_full_profile:
- {
- MethodData* mdo = method->method_data();
- if (mdo != NULL) {
- if (mdo->would_profile()) {
- int mdo_i = mdo->invocation_count_delta();
- int mdo_b = mdo->backedge_count_delta();
- if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
- next_level = CompLevel_full_optimization;
- }
- } else {
- next_level = CompLevel_full_optimization;
- }
- }
- }
- break;
- }
- }
- return MIN2(next_level, (CompLevel)TieredStopAtLevel);
-}
-
-// Determine if a method should be compiled with a normal entry point at a different level.
-CompLevel TieredThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) {
- CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
- common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true));
- CompLevel next_level = common(&TieredThresholdPolicy::call_predicate, method, cur_level);
-
- // If OSR method level is greater than the regular method level, the levels should be
- // equalized by raising the regular method level in order to avoid OSRs during each
- // invocation of the method.
- if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
- MethodData* mdo = method->method_data();
- guarantee(mdo != NULL, "MDO should not be NULL");
- if (mdo->invocation_count() >= 1) {
- next_level = CompLevel_full_optimization;
- }
- } else {
- next_level = MAX2(osr_level, next_level);
- }
- return next_level;
-}
-
-// Determine if we should do an OSR compilation of a given method.
-CompLevel TieredThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) {
- CompLevel next_level = common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true);
- if (cur_level == CompLevel_none) {
- // If there is a live OSR method that means that we deopted to the interpreter
- // for the transition.
- CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
- if (osr_level > CompLevel_none) {
- return osr_level;
- }
- }
- return next_level;
-}
-
-bool TieredThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) {
- if (UseAOT) {
- if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
- // If the current level is full profile or interpreter and we're switching to any other level,
- // activate the AOT code back first so that we won't waste time overprofiling.
- compile(mh, InvocationEntryBci, CompLevel_aot, thread);
- // Fall through for JIT compilation.
- }
- if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
- // If the next level is limited profile, use the aot code (if there is any),
- // since it's essentially the same thing.
- compile(mh, InvocationEntryBci, CompLevel_aot, thread);
- // Not need to JIT, we're done.
- return true;
- }
- }
- return false;
-}
-
-
-// Handle the invocation event.
-void TieredThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
- CompLevel level, CompiledMethod* nm, JavaThread* thread) {
- if (should_create_mdo(mh(), level)) {
- create_mdo(mh, thread);
- }
- CompLevel next_level = call_event(mh(), level, thread);
- if (next_level != level) {
- if (maybe_switch_to_aot(mh, level, next_level, thread)) {
- // No JITting necessary
- return;
- }
- if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
- compile(mh, InvocationEntryBci, next_level, thread);
- }
- }
-}
-
-// Handle the back branch event. Notice that we can compile the method
-// with a regular entry from here.
-void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
- int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
- if (should_create_mdo(mh(), level)) {
- create_mdo(mh, thread);
- }
- // Check if MDO should be created for the inlined method
- if (should_create_mdo(imh(), level)) {
- create_mdo(imh, thread);
- }
-
- if (is_compilation_enabled()) {
- CompLevel next_osr_level = loop_event(imh(), level, thread);
- CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
- // At the very least compile the OSR version
- if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
- compile(imh, bci, next_osr_level, thread);
- }
-
- // Use loop event as an opportunity to also check if there's been
- // enough calls.
- CompLevel cur_level, next_level;
- if (mh() != imh()) { // If there is an enclosing method
- if (level == CompLevel_aot) {
- // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
- if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
- compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread);
- }
- } else {
- // Current loop event level is not AOT
- guarantee(nm != NULL, "Should have nmethod here");
- cur_level = comp_level(mh());
- next_level = call_event(mh(), cur_level, thread);
-
- if (max_osr_level == CompLevel_full_optimization) {
- // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
- bool make_not_entrant = false;
- if (nm->is_osr_method()) {
- // This is an osr method, just make it not entrant and recompile later if needed
- make_not_entrant = true;
- } else {
- if (next_level != CompLevel_full_optimization) {
- // next_level is not full opt, so we need to recompile the
- // enclosing method without the inlinee
- cur_level = CompLevel_none;
- make_not_entrant = true;
- }
- }
- if (make_not_entrant) {
- if (PrintTieredEvents) {
- int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
- print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
- }
- nm->make_not_entrant();
- }
- }
- // Fix up next_level if necessary to avoid deopts
- if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
- next_level = CompLevel_full_profile;
- }
- if (cur_level != next_level) {
- if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
- compile(mh, InvocationEntryBci, next_level, thread);
- }
- }
- }
- } else {
- cur_level = comp_level(mh());
- next_level = call_event(mh(), cur_level, thread);
- if (next_level != cur_level) {
- if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
- compile(mh, InvocationEntryBci, next_level, thread);
- }
- }
- }
- }
-}
-
-#endif
--- a/src/hotspot/share/runtime/tieredThresholdPolicy.hpp Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,279 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_RUNTIME_TIEREDTHRESHOLDPOLICY_HPP
-#define SHARE_RUNTIME_TIEREDTHRESHOLDPOLICY_HPP
-
-#include "code/nmethod.hpp"
-#include "oops/methodData.hpp"
-#include "runtime/compilationPolicy.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-#ifdef TIERED
-
-class CompileTask;
-class CompileQueue;
-/*
- * The system supports 5 execution levels:
- * * level 0 - interpreter
- * * level 1 - C1 with full optimization (no profiling)
- * * level 2 - C1 with invocation and backedge counters
- * * level 3 - C1 with full profiling (level 2 + MDO)
- * * level 4 - C2
- *
- * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters
- * (invocation counters and backedge counters). The frequency of these notifications is
- * different at each level. These notifications are used by the policy to decide what transition
- * to make.
- *
- * Execution starts at level 0 (interpreter), then the policy can decide either to compile the
- * method at level 3 or level 2. The decision is based on the following factors:
- * 1. The length of the C2 queue determines the next level. The observation is that level 2
- * is generally faster than level 3 by about 30%, therefore we would want to minimize the time
- * a method spends at level 3. We should only spend the time at level 3 that is necessary to get
- * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to
- * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile
- * request makes its way through the long queue. When the load on C2 recedes we are going to
- * recompile at level 3 and start gathering profiling information.
- * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce
- * additional filtering if the compiler is overloaded. The rationale is that by the time a
- * method gets compiled it can become unused, so it doesn't make sense to put too much onto the
- * queue.
- *
- * After profiling is completed at level 3 the transition is made to level 4. Again, the length
- * of the C2 queue is used as a feedback to adjust the thresholds.
- *
- * After the first C1 compile some basic information is determined about the code like the number
- * of the blocks and the number of the loops. Based on that it can be decided that a method
- * is trivial and compiling it with C1 will yield the same code. In this case the method is
- * compiled at level 1 instead of 4.
- *
- * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of
- * the code and the C2 queue is sufficiently small we can decide to start profiling in the
- * interpreter (and continue profiling in the compiled code once the level 3 version arrives).
- * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2
- * version is compiled instead in order to run faster waiting for a level 4 version.
- *
- * Compile queues are implemented as priority queues - for each method in the queue we compute
- * the event rate (the number of invocation and backedge counter increments per unit of time).
- * When getting an element off the queue we pick the one with the largest rate. Maintaining the
- * rate also allows us to remove stale methods (the ones that got on the queue but stopped
- * being used shortly after that).
-*/
-
-/* Command line options:
- * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method
- * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
- * makes a call into the runtime.
- *
- * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
- * compilation thresholds.
- * Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
- * Other thresholds work as follows:
- *
- * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when
- * the following predicate is true (X is the level):
- *
- * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s),
- *
- * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling
- * coefficient that will be discussed further.
- * The intuition is to equalize the time that is spend profiling each method.
- * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
- * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
- * from Method* and for 3->4 transition they come from MDO (since profiled invocations are
- * counted separately). Finally, if a method does not contain anything worth profiling, a transition
- * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
- * what is specified by Tier4InvocationThreshold).
- *
- * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
- *
- * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending
- * on the compiler load. The scaling coefficients are computed as follows:
- *
- * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1,
- *
- * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X
- * is the number of level X compiler threads.
- *
- * Basically these parameters describe how many methods should be in the compile queue
- * per compiler thread before the scaling coefficient increases by one.
- *
- * This feedback provides the mechanism to automatically control the flow of compilation requests
- * depending on the machine speed, mutator load and other external factors.
- *
- * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop.
- * Consider the following observation: a method compiled with full profiling (level 3)
- * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO).
- * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue
- * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues
- * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed.
- * The idea is to dynamically change the behavior of the system in such a way that if a substantial
- * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster.
- * And then when the load decreases to allow 2->3 transitions.
- *
- * Tier3Delay* parameters control this switching mechanism.
- * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy
- * no longer does 0->3 transitions but does 0->2 transitions instead.
- * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue
- * per compiler thread falls below the specified amount.
- * The hysteresis is necessary to avoid jitter.
- *
- * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue.
- * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to
- * compile from the compile queue, we also can detect stale methods for which the rate has been
- * 0 for some time in the same iteration. Stale methods can appear in the queue when an application
- * abruptly changes its behavior.
- *
- * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick
- * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything
- * with pure c1.
- *
- * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the
- * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the
- * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled
- * version in time. This reduces the overall transition to level 4 and decreases the startup time.
- * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long
- * these is not reason to start profiling prematurely.
- *
- * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation.
- * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered
- * to be zero if no events occurred in TieredRateUpdateMaxTime.
- */
-
-class TieredThresholdPolicy : public CompilationPolicy {
- jlong _start_time;
- int _c1_count, _c2_count;
-
- // Check if the counter is big enough and set carry (effectively infinity).
- inline void set_carry_if_necessary(InvocationCounter *counter);
- // Set carry flags in the counters (in Method* and MDO).
- inline void handle_counter_overflow(Method* method);
- // Call and loop predicates determine whether a transition to a higher compilation
- // level should be performed (pointers to predicate functions are passed to common_TF().
- // Predicates also take compiler load into account.
- typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
- bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
- bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
- // Common transition function. Given a predicate determines if a method should transition to another level.
- CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false);
- // Transition functions.
- // call_event determines if a method should be compiled at a different
- // level with a regular invocation entry.
- CompLevel call_event(Method* method, CompLevel cur_level, JavaThread* thread);
- // loop_event checks if a method should be OSR compiled at a different
- // level.
- CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread* thread);
- void print_counters(const char* prefix, const methodHandle& mh);
- // Has a method been long around?
- // We don't remove old methods from the compile queue even if they have
- // very low activity (see select_task()).
- inline bool is_old(Method* method);
- // Was a given method inactive for a given number of milliseconds.
- // If it is, we would remove it from the queue (see select_task()).
- inline bool is_stale(jlong t, jlong timeout, Method* m);
- // Compute the weight of the method for the compilation scheduling
- inline double weight(Method* method);
- // Apply heuristics and return true if x should be compiled before y
- inline bool compare_methods(Method* x, Method* y);
- // Compute event rate for a given method. The rate is the number of event (invocations + backedges)
- // per millisecond.
- inline void update_rate(jlong t, Method* m);
- // Compute threshold scaling coefficient
- inline double threshold_scale(CompLevel level, int feedback_k);
- // If a method is old enough and is still in the interpreter we would want to
- // start profiling without waiting for the compiled method to arrive. This function
- // determines whether we should do that.
- inline bool should_create_mdo(Method* method, CompLevel cur_level);
- // Create MDO if necessary.
- void create_mdo(const methodHandle& mh, JavaThread* thread);
- // Is method profiled enough?
- bool is_method_profiled(Method* method);
-
- double _increase_threshold_at_ratio;
-
- bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread);
-
-protected:
- int c1_count() const { return _c1_count; }
- int c2_count() const { return _c2_count; }
- void set_c1_count(int x) { _c1_count = x; }
- void set_c2_count(int x) { _c2_count = x; }
-
- enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
- void print_event(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level);
- // Print policy-specific information if necessary
- virtual void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level);
- // Check if the method can be compiled, change level if necessary
- void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
- // Submit a given method for compilation
- virtual void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
- // Simple methods are as good being compiled with C1 as C2.
- // This function tells if it's such a function.
- inline static bool is_trivial(Method* method);
- // Force method to be compiled at CompLevel_simple?
- inline static bool should_compile_at_level_simple(Method* method);
-
- // Predicate helpers are used by .*_predicate() methods as well as others.
- // They check the given counter values, multiplied by the scale against the thresholds.
- template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale, Method* method);
- template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method);
-
- // Get a compilation level for a given method.
- static CompLevel comp_level(Method* method);
- virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
- CompLevel level, CompiledMethod* nm, JavaThread* thread);
- virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
- int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread);
-
- void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
- void set_start_time(jlong t) { _start_time = t; }
- jlong start_time() const { return _start_time; }
-
-public:
- TieredThresholdPolicy() : _start_time(0), _c1_count(0), _c2_count(0) { }
- virtual int compiler_count(CompLevel comp_level) {
- if (is_c1_compile(comp_level)) return c1_count();
- if (is_c2_compile(comp_level)) return c2_count();
- return 0;
- }
- virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); }
- virtual void do_safepoint_work() { }
- virtual void delay_compilation(Method* method) { }
- virtual void disable_compilation(Method* method) { }
- virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
- virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee,
- int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread);
- // Select task is called by CompileBroker. We should return a task or NULL.
- virtual CompileTask* select_task(CompileQueue* compile_queue);
- // Tell the runtime if we think a given method is adequately profiled.
- virtual bool is_mature(Method* method);
- // Initialize: set compiler thread count
- virtual void initialize();
- virtual bool should_not_inline(ciEnv* env, ciMethod* callee);
-};
-
-#endif // TIERED
-
-#endif // SHARE_RUNTIME_TIEREDTHRESHOLDPOLICY_HPP
--- a/src/hotspot/share/runtime/vmStructs.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/vmStructs.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -88,6 +88,7 @@
#include "runtime/globals.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/notificationThread.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/serviceThread.hpp"
@@ -1366,6 +1367,7 @@
declare_type(JavaThread, Thread) \
declare_type(JvmtiAgentThread, JavaThread) \
declare_type(ServiceThread, JavaThread) \
+ declare_type(NotificationThread, JavaThread) \
declare_type(CompilerThread, JavaThread) \
declare_type(CodeCacheSweeperThread, JavaThread) \
declare_toplevel_type(OSThread) \
--- a/src/hotspot/share/runtime/vmThread.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/runtime/vmThread.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -354,9 +354,9 @@
void VMThread::wait_for_vm_thread_exit() {
assert(Thread::current()->is_Java_thread(), "Should be a JavaThread");
assert(((JavaThread*)Thread::current())->is_terminated(), "Should be terminated");
- { MutexLocker mu(VMOperationQueue_lock, Mutex::_no_safepoint_check_flag);
+ { MonitorLocker mu(VMOperationQueue_lock, Mutex::_no_safepoint_check_flag);
_should_terminate = true;
- VMOperationQueue_lock->notify();
+ mu.notify();
}
// Note: VM thread leaves at Safepoint. We are not stopped by Safepoint
@@ -620,8 +620,8 @@
//
// Notify (potential) waiting Java thread(s)
- { MutexLocker mu(VMOperationRequest_lock, Mutex::_no_safepoint_check_flag);
- VMOperationRequest_lock->notify_all();
+ { MonitorLocker mu(VMOperationRequest_lock, Mutex::_no_safepoint_check_flag);
+ mu.notify_all();
}
// We want to make sure that we get to a safepoint regularly
@@ -695,12 +695,11 @@
// VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests
// to be queued up during a safepoint synchronization.
{
- VMOperationQueue_lock->lock_without_safepoint_check();
+ MonitorLocker ml(VMOperationQueue_lock, Mutex::_no_safepoint_check_flag);
log_debug(vmthread)("Adding VM operation: %s", op->name());
_vm_queue->add(op);
op->set_timestamp(os::javaTimeMillis());
- VMOperationQueue_lock->notify();
- VMOperationQueue_lock->unlock();
+ ml.notify();
}
if (!concurrent) {
--- a/src/hotspot/share/services/diagnosticFramework.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/services/diagnosticFramework.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -437,9 +437,9 @@
}
void DCmdFactory::push_jmx_notification_request() {
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
_has_pending_jmx_notification = true;
- Service_lock->notify_all();
+ Notification_lock->notify_all();
}
void DCmdFactory::send_notification(TRAPS) {
@@ -455,7 +455,7 @@
HandleMark hm(THREAD);
bool notif = false;
{
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
notif = _has_pending_jmx_notification;
_has_pending_jmx_notification = false;
}
--- a/src/hotspot/share/services/gcNotifier.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/services/gcNotifier.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -54,18 +54,18 @@
}
void GCNotifier::addRequest(GCNotificationRequest *request) {
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
if(first_request == NULL) {
first_request = request;
} else {
last_request->next = request;
}
last_request = request;
- Service_lock->notify_all();
+ Notification_lock->notify_all();
}
GCNotificationRequest *GCNotifier::getRequest() {
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
GCNotificationRequest *request = first_request;
if(first_request != NULL) {
first_request = first_request->next;
--- a/src/hotspot/share/services/lowMemoryDetector.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/services/lowMemoryDetector.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -40,7 +40,7 @@
volatile jint LowMemoryDetector::_disabled_count = 0;
bool LowMemoryDetector::has_pending_requests() {
- assert(Service_lock->owned_by_self(), "Must own Service_lock");
+ assert(Notification_lock->owned_by_self(), "Must own Notification_lock");
bool has_requests = false;
int num_memory_pools = MemoryService::num_memory_pools();
for (int i = 0; i < num_memory_pools; i++) {
@@ -62,7 +62,7 @@
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
- // No need to hold Service_lock to call out to Java
+ // No need to hold Notification_lock to call out to Java
int num_memory_pools = MemoryService::num_memory_pools();
for (int i = 0; i < num_memory_pools; i++) {
MemoryPool* pool = MemoryService::get_memory_pool(i);
@@ -80,7 +80,7 @@
// This method could be called from any Java threads
// and also VMThread.
void LowMemoryDetector::detect_low_memory() {
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
bool has_pending_requests = false;
int num_memory_pools = MemoryService::num_memory_pools();
@@ -98,7 +98,7 @@
}
if (has_pending_requests) {
- Service_lock->notify_all();
+ Notification_lock->notify_all();
}
}
@@ -113,14 +113,14 @@
}
{
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
MemoryUsage usage = pool->get_memory_usage();
sensor->set_gauge_sensor_level(usage,
pool->usage_threshold());
if (sensor->has_pending_requests()) {
// notify sensor state update
- Service_lock->notify_all();
+ Notification_lock->notify_all();
}
}
}
@@ -135,14 +135,14 @@
}
{
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
MemoryUsage usage = pool->get_last_collection_usage();
sensor->set_counter_sensor_level(usage, pool->gc_usage_threshold());
if (sensor->has_pending_requests()) {
// notify sensor state update
- Service_lock->notify_all();
+ Notification_lock->notify_all();
}
}
}
@@ -205,7 +205,7 @@
// If the current level is between high and low threshold, no change.
//
void SensorInfo::set_gauge_sensor_level(MemoryUsage usage, ThresholdSupport* high_low_threshold) {
- assert(Service_lock->owned_by_self(), "Must own Service_lock");
+ assert(Notification_lock->owned_by_self(), "Must own Notification_lock");
assert(high_low_threshold->is_high_threshold_supported(), "just checking");
bool is_over_high = high_low_threshold->is_high_threshold_crossed(usage);
@@ -260,7 +260,7 @@
// the sensor will be on (i.e. sensor is currently off
// and has pending trigger requests).
void SensorInfo::set_counter_sensor_level(MemoryUsage usage, ThresholdSupport* counter_threshold) {
- assert(Service_lock->owned_by_self(), "Must own Service_lock");
+ assert(Notification_lock->owned_by_self(), "Must own Notification_lock");
assert(counter_threshold->is_high_threshold_supported(), "just checking");
bool is_over_high = counter_threshold->is_high_threshold_crossed(usage);
@@ -334,8 +334,8 @@
}
{
- // Holds Service_lock and update the sensor state
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ // Holds Notification_lock and update the sensor state
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
assert(_pending_trigger_count > 0, "Must have pending trigger");
_sensor_on = true;
_sensor_count += count;
@@ -345,8 +345,8 @@
void SensorInfo::clear(int count, TRAPS) {
{
- // Holds Service_lock and update the sensor state
- MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ // Holds Notification_lock and update the sensor state
+ MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag);
if (_pending_clear_count == 0) {
// Bail out if we lost a race to set_*_sensor_level() which may have
// reactivated the sensor in the meantime because it was triggered again.
--- a/src/hotspot/share/services/lowMemoryDetector.hpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/services/lowMemoryDetector.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -59,7 +59,8 @@
//
// May need to deal with hysteresis effect.
//
-// Memory detection code runs in the Service thread (serviceThread.hpp).
+// Memory detection code runs in the Notification thread or
+// ServiceThread depending on UseNotificationThread flag.
class OopClosure;
class MemoryPool;
@@ -214,6 +215,7 @@
class LowMemoryDetector : public AllStatic {
friend class LowMemoryDetectorDisabler;
friend class ServiceThread;
+ friend class NotificationThread;
private:
// true if any collected heap has low memory detection enabled
static volatile bool _enabled_for_collected_pools;
--- a/src/hotspot/share/services/management.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/services/management.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -44,6 +44,7 @@
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
+#include "runtime/notificationThread.hpp"
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/thread.inline.hpp"
@@ -148,7 +149,9 @@
void Management::initialize(TRAPS) {
// Start the service thread
ServiceThread::initialize();
-
+ if (UseNotificationThread) {
+ NotificationThread::initialize();
+ }
if (ManagementServer) {
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
@@ -2068,6 +2071,31 @@
}
#endif // INCLUDE_MANAGEMENT
+// Gets the amount of memory allocated on the Java heap for a single thread.
+// Returns -1 if the thread does not exist or has terminated.
+JVM_ENTRY(jlong, jmm_GetOneThreadAllocatedMemory(JNIEnv *env, jlong thread_id))
+ if (thread_id < 0) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid thread ID", -1);
+ }
+
+ if (thread_id == 0) {
+ // current thread
+ if (THREAD->is_Java_thread()) {
+ return ((JavaThread*)THREAD)->cooked_allocated_bytes();
+ }
+ return -1;
+ }
+
+ ThreadsListHandle tlh;
+ JavaThread* java_thread = tlh.list()->find_JavaThread_from_java_tid(thread_id);
+
+ if (java_thread != NULL) {
+ return java_thread->cooked_allocated_bytes();
+ }
+ return -1;
+JVM_END
+
// Gets an array containing the amount of memory allocated on the Java
// heap for a set of threads (in bytes). Each element of the array is
// the amount of memory allocated for the thread ID specified in the
@@ -2192,6 +2220,7 @@
jmm_GetMemoryManagers,
jmm_GetMemoryPoolUsage,
jmm_GetPeakMemoryPoolUsage,
+ jmm_GetOneThreadAllocatedMemory,
jmm_GetThreadAllocatedMemory,
jmm_GetMemoryUsage,
jmm_GetLongAttribute,
--- a/src/hotspot/share/services/memoryManager.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/services/memoryManager.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -178,7 +178,7 @@
_num_collections = 0;
_last_gc_stat = NULL;
_last_gc_lock = new Mutex(Mutex::leaf, "_last_gc_lock", true,
- Monitor::_safepoint_check_never);
+ Mutex::_safepoint_check_never);
_current_gc_stat = NULL;
_num_gc_threads = 1;
_notification_enabled = false;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/services/threadIdTable.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,259 @@
+
+/*
+* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#include "precompiled.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/threadSMR.hpp"
+#include "runtime/timerTrace.hpp"
+#include "services/threadIdTable.hpp"
+#include "utilities/concurrentHashTable.inline.hpp"
+#include "utilities/concurrentHashTableTasks.inline.hpp"
+
+
+typedef ConcurrentHashTable<ThreadIdTableConfig, mtInternal> ThreadIdTableHash;
+
+// 2^24 is max size
+static const size_t END_SIZE = 24;
+// Default initial size 256
+static const size_t DEFAULT_TABLE_SIZE_LOG = 8;
+// Prefer short chains of avg 2
+static const double PREF_AVG_LIST_LEN = 2.0;
+static ThreadIdTableHash* volatile _local_table = NULL;
+static volatile size_t _current_size = 0;
+static volatile size_t _items_count = 0;
+
+volatile bool ThreadIdTable::_is_initialized = false;
+volatile bool ThreadIdTable::_has_work = false;
+
+class ThreadIdTableEntry : public CHeapObj<mtInternal> {
+private:
+ jlong _tid;
+ JavaThread* _java_thread;
+public:
+ ThreadIdTableEntry(jlong tid, JavaThread* java_thread) :
+ _tid(tid), _java_thread(java_thread) {}
+
+ jlong tid() const { return _tid; }
+ JavaThread* thread() const { return _java_thread; }
+};
+
+class ThreadIdTableConfig : public AllStatic {
+ public:
+ typedef ThreadIdTableEntry* Value;
+
+ static uintx get_hash(Value const& value, bool* is_dead) {
+ jlong tid = value->tid();
+ return primitive_hash(tid);
+ }
+ static void* allocate_node(size_t size, Value const& value) {
+ ThreadIdTable::item_added();
+ return AllocateHeap(size, mtInternal);
+ }
+ static void free_node(void* memory, Value const& value) {
+ delete value;
+ FreeHeap(memory);
+ ThreadIdTable::item_removed();
+ }
+};
+
+static size_t ceil_log2(size_t val) {
+ size_t ret;
+ for (ret = 1; ((size_t)1 << ret) < val; ++ret);
+ return ret;
+}
+
+// Lazily creates the table and populates it with the given
+// thread list
+void ThreadIdTable::lazy_initialize(const ThreadsList *threads) {
+ if (!_is_initialized) {
+ {
+ // There is no obvious benefits in allowing the thread table
+ // to be concurently populated during the initalization.
+ MutexLocker ml(ThreadIdTableCreate_lock);
+ if (_is_initialized) {
+ return;
+ }
+ create_table(threads->length());
+ _is_initialized = true;
+ }
+ for (uint i = 0; i < threads->length(); i++) {
+ JavaThread* thread = threads->thread_at(i);
+ oop tobj = thread->threadObj();
+ if (tobj != NULL) {
+ jlong java_tid = java_lang_Thread::thread_id(tobj);
+ MutexLocker ml(Threads_lock);
+ if (!thread->is_exiting()) {
+ // Must be inside the lock to ensure that we don't add a thread to the table
+ // that has just passed the removal point in ThreadsSMRSupport::remove_thread()
+ add_thread(java_tid, thread);
+ }
+ }
+ }
+ }
+}
+
+void ThreadIdTable::create_table(size_t size) {
+ assert(_local_table == NULL, "Thread table is already created");
+ size_t size_log = ceil_log2(size);
+ size_t start_size_log =
+ size_log > DEFAULT_TABLE_SIZE_LOG ? size_log : DEFAULT_TABLE_SIZE_LOG;
+ _current_size = (size_t)1 << start_size_log;
+ _local_table = new ThreadIdTableHash(start_size_log, END_SIZE);
+}
+
+void ThreadIdTable::item_added() {
+ Atomic::inc(&_items_count);
+ log_trace(thread, table) ("Thread entry added");
+}
+
+void ThreadIdTable::item_removed() {
+ Atomic::dec(&_items_count);
+ log_trace(thread, table) ("Thread entry removed");
+}
+
+double ThreadIdTable::get_load_factor() {
+ return ((double)_items_count) / _current_size;
+}
+
+size_t ThreadIdTable::table_size() {
+ return (size_t)1 << _local_table->get_size_log2(Thread::current());
+}
+
+void ThreadIdTable::check_concurrent_work() {
+ if (_has_work) {
+ return;
+ }
+
+ double load_factor = get_load_factor();
+ // Resize if we have more items than preferred load factor
+ if ( load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) {
+ log_debug(thread, table)("Concurrent work triggered, load factor: %g",
+ load_factor);
+ trigger_concurrent_work();
+ }
+}
+
+void ThreadIdTable::trigger_concurrent_work() {
+ MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ _has_work = true;
+ Service_lock->notify_all();
+}
+
+void ThreadIdTable::grow(JavaThread* jt) {
+ ThreadIdTableHash::GrowTask gt(_local_table);
+ if (!gt.prepare(jt)) {
+ return;
+ }
+ log_trace(thread, table)("Started to grow");
+ TraceTime timer("Grow", TRACETIME_LOG(Debug, membername, table, perf));
+ while (gt.do_task(jt)) {
+ gt.pause(jt);
+ {
+ ThreadBlockInVM tbivm(jt);
+ }
+ gt.cont(jt);
+ }
+ gt.done(jt);
+ _current_size = table_size();
+ log_info(thread, table)("Grown to size:" SIZE_FORMAT, _current_size);
+}
+
+class ThreadIdTableLookup : public StackObj {
+private:
+ jlong _tid;
+ uintx _hash;
+public:
+ ThreadIdTableLookup(jlong tid)
+ : _tid(tid), _hash(primitive_hash(tid)) {}
+ uintx get_hash() const {
+ return _hash;
+ }
+ bool equals(ThreadIdTableEntry** value, bool* is_dead) {
+ bool equals = primitive_equals(_tid, (*value)->tid());
+ if (!equals) {
+ return false;
+ }
+ return true;
+ }
+};
+
+class ThreadGet : public StackObj {
+private:
+ JavaThread* _return;
+public:
+ ThreadGet(): _return(NULL) {}
+ void operator()(ThreadIdTableEntry** val) {
+ _return = (*val)->thread();
+ }
+ JavaThread* get_res_thread() {
+ return _return;
+ }
+};
+
+void ThreadIdTable::do_concurrent_work(JavaThread* jt) {
+ assert(_is_initialized, "Thread table is not initialized");
+ _has_work = false;
+ double load_factor = get_load_factor();
+ log_debug(thread, table)("Concurrent work, load factor: %g", load_factor);
+ if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) {
+ grow(jt);
+ }
+}
+
+JavaThread* ThreadIdTable::add_thread(jlong tid, JavaThread* java_thread) {
+ assert(_is_initialized, "Thread table is not initialized");
+ Thread* thread = Thread::current();
+ ThreadIdTableLookup lookup(tid);
+ ThreadGet tg;
+ while (true) {
+ if (_local_table->get(thread, lookup, tg)) {
+ return tg.get_res_thread();
+ }
+ ThreadIdTableEntry* entry = new ThreadIdTableEntry(tid, java_thread);
+ // The hash table takes ownership of the ThreadTableEntry,
+ // even if it's not inserted.
+ if (_local_table->insert(thread, lookup, entry)) {
+ check_concurrent_work();
+ return java_thread;
+ }
+ }
+}
+
+JavaThread* ThreadIdTable::find_thread_by_tid(jlong tid) {
+ assert(_is_initialized, "Thread table is not initialized");
+ Thread* thread = Thread::current();
+ ThreadIdTableLookup lookup(tid);
+ ThreadGet tg;
+ _local_table->get(thread, lookup, tg);
+ return tg.get_res_thread();
+}
+
+bool ThreadIdTable::remove_thread(jlong tid) {
+ assert(_is_initialized, "Thread table is not initialized");
+ Thread* thread = Thread::current();
+ ThreadIdTableLookup lookup(tid);
+ return _local_table->remove(thread, lookup);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/services/threadIdTable.hpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,68 @@
+
+/*
+* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_SERVICES_THREADIDTABLE_HPP
+#define SHARE_SERVICES_THREADIDTABLE_HPP
+
+#include "memory/allocation.hpp"
+
+class JavaThread;
+class ThreadsList;
+class ThreadIdTableConfig;
+
+class ThreadIdTable : public AllStatic {
+ friend class ThreadIdTableConfig;
+
+ static volatile bool _is_initialized;
+ static volatile bool _has_work;
+
+public:
+ // Initialization
+ static void lazy_initialize(const ThreadsList* threads);
+ static bool is_initialized() { return _is_initialized; }
+
+ // Lookup and list management
+ static JavaThread* find_thread_by_tid(jlong tid);
+ static JavaThread* add_thread(jlong tid, JavaThread* thread);
+ static bool remove_thread(jlong tid);
+
+ // Growing
+ static bool has_work() { return _has_work; }
+ static void do_concurrent_work(JavaThread* jt);
+
+private:
+ static void create_table(size_t size);
+
+ static size_t table_size();
+ static double get_load_factor();
+ static void check_concurrent_work();
+ static void trigger_concurrent_work();
+ static void grow(JavaThread* jt);
+
+ static void item_added();
+ static void item_removed();
+};
+
+#endif // SHARE_SERVICES_THREADIDTABLE_HPP
--- a/src/hotspot/share/services/threadService.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/src/hotspot/share/services/threadService.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -32,6 +32,7 @@
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "prims/jvmtiRawMonitor.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
@@ -217,10 +218,10 @@
} else {
ObjectMonitor *enter_obj = thread->current_pending_monitor();
if (enter_obj != NULL) {
- // thread is trying to enter() or raw_enter() an ObjectMonitor.
+ // thread is trying to enter() an ObjectMonitor.
obj = (oop) enter_obj->object();
+ assert(obj != NULL, "ObjectMonitor should have an associated object!");
}
- // If obj == NULL, then ObjectMonitor is raw which doesn't count.
}
Handle h(Thread::current(), obj);
@@ -354,13 +355,15 @@
}
}
-// Find deadlocks involving object monitors and concurrent locks if concurrent_locks is true
+// Find deadlocks involving raw monitors, object monitors and concurrent locks
+// if concurrent_locks is true.
DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
// This code was modified from the original Threads::find_deadlocks code.
int globalDfn = 0, thisDfn;
ObjectMonitor* waitingToLockMonitor = NULL;
+ JvmtiRawMonitor* waitingToLockRawMonitor = NULL;
oop waitingToLockBlocker = NULL;
bool blocked_on_monitor = false;
JavaThread *currentThread, *previousThread;
@@ -391,13 +394,30 @@
// When there is a deadlock, all the monitors involved in the dependency
// cycle must be contended and heavyweight. So we only care about the
// heavyweight monitor a thread is waiting to lock.
- waitingToLockMonitor = (ObjectMonitor*)jt->current_pending_monitor();
+ waitingToLockMonitor = jt->current_pending_monitor();
+ // JVM TI raw monitors can also be involved in deadlocks, and we can be
+ // waiting to lock both a raw monitor and ObjectMonitor at the same time.
+ // It isn't clear how to make deadlock detection work correctly if that
+ // happens.
+ waitingToLockRawMonitor = jt->current_pending_raw_monitor();
+
if (concurrent_locks) {
waitingToLockBlocker = jt->current_park_blocker();
}
- while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) {
+
+ while (waitingToLockMonitor != NULL ||
+ waitingToLockRawMonitor != NULL ||
+ waitingToLockBlocker != NULL) {
cycle->add_thread(currentThread);
- if (waitingToLockMonitor != NULL) {
+ // Give preference to the raw monitor
+ if (waitingToLockRawMonitor != NULL) {
+ Thread* owner = waitingToLockRawMonitor->owner();
+ if (owner != NULL && // the raw monitor could be released at any time
+ owner->is_Java_thread()) {
+ // only JavaThreads can be reported here
+ currentThread = (JavaThread*) owner;
+ }
+ } else if (waitingToLockMonitor != NULL) {
address currentOwner = (address)waitingToLockMonitor->owner();
if (currentOwner != NULL) {
currentThread = Threads::owning_thread_from_monitor_owner(t_list,
@@ -948,28 +968,44 @@
JavaThread* currentThread;
ObjectMonitor* waitingToLockMonitor;
+ JvmtiRawMonitor* waitingToLockRawMonitor;
oop waitingToLockBlocker;
int len = _threads->length();
for (int i = 0; i < len; i++) {
currentThread = _threads->at(i);
- waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
+ waitingToLockMonitor = currentThread->current_pending_monitor();
+ waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
waitingToLockBlocker = currentThread->current_park_blocker();
st->cr();
st->print_cr("\"%s\":", currentThread->get_thread_name());
const char* owner_desc = ",\n which is held by";
+
+ // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
+ // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
+ if (waitingToLockRawMonitor != NULL) {
+ st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
+ Thread* owner = waitingToLockRawMonitor->owner();
+ // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread
+ if (owner != NULL) {
+ if (owner->is_Java_thread()) {
+ currentThread = (JavaThread*) owner;
+ st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
+ } else {
+ st->print_cr(",\n which has now been released");
+ }
+ } else {
+ st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
+ }
+ }
+
if (waitingToLockMonitor != NULL) {
st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
oop obj = (oop)waitingToLockMonitor->object();
- if (obj != NULL) {
- st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
- obj->klass()->external_name());
+ st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
+ obj->klass()->external_name());
- if (!currentThread->current_pending_monitor_is_from_java()) {
- owner_desc = "\n in JNI, which is held by";
- }
- } else {
- // No Java object associated - a JVMTI raw monitor
- owner_desc = " (JVMTI raw monitor),\n which is held by";
+ if (!currentThread->current_pending_monitor_is_from_java()) {
+ owner_desc = "\n in JNI, which is held by";
}
currentThread = Threads::owning_thread_from_monitor_owner(t_list,
(address)waitingToLockMonitor->owner());
@@ -978,7 +1014,7 @@
// that owns waitingToLockMonitor should be findable, but
// if it is not findable, then the previous currentThread is
// blocked permanently.
- st->print("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
+ st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
p2i(waitingToLockMonitor->owner()));
continue;
}
@@ -992,11 +1028,10 @@
currentThread = java_lang_Thread::thread(ownerObj);
assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL");
}
- st->print("%s \"%s\"", owner_desc, currentThread->get_thread_name());
+ st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
}
st->cr();
- st->cr();
// Print stack traces
bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
--- a/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,10 +79,26 @@
ArrayList<UnixMountEntry> entries = new ArrayList<>();
try {
long fp = setmntent(Util.toBytes(fstab), Util.toBytes("r"));
+ int maxLineSize = 1024;
+ try {
+ for (;;) {
+ int lineSize = getlinelen(fp);
+ if (lineSize == -1)
+ break;
+ if (lineSize > maxLineSize)
+ maxLineSize = lineSize;
+ }
+ } catch (UnixException x) {
+ // nothing we need to do
+ } finally {
+ rewind(fp);
+ }
+
try {
for (;;) {
UnixMountEntry entry = new UnixMountEntry();
- int res = getmntent(fp, entry);
+ // count in NUL character at the end
+ int res = getmntent(fp, entry, maxLineSize + 1);
if (res < 0)
break;
entries.add(entry);
--- a/src/java.base/linux/classes/sun/nio/fs/LinuxNativeDispatcher.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/linux/classes/sun/nio/fs/LinuxNativeDispatcher.java Fri Oct 11 12:08:01 2019 +0530
@@ -51,7 +51,17 @@
/**
* int getmntent(FILE *fp, struct mnttab *mp, int len);
*/
- static native int getmntent(long fp, UnixMountEntry entry)
+
+ static int getmntent(long fp, UnixMountEntry entry, int buflen) throws UnixException {
+ NativeBuffer buffer = NativeBuffers.getNativeBuffer(buflen);
+ try {
+ return getmntent0(fp, entry, buffer.address(), buflen);
+ } finally {
+ buffer.release();
+ }
+ }
+
+ static native int getmntent0(long fp, UnixMountEntry entry, long buffer, int bufLen)
throws UnixException;
/**
--- a/src/java.base/linux/native/libnio/fs/LinuxNativeDispatcher.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/linux/native/libnio/fs/LinuxNativeDispatcher.c Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -169,12 +169,11 @@
}
JNIEXPORT jint JNICALL
-Java_sun_nio_fs_LinuxNativeDispatcher_getmntent(JNIEnv* env, jclass this,
- jlong value, jobject entry)
+Java_sun_nio_fs_LinuxNativeDispatcher_getmntent0(JNIEnv* env, jclass this,
+ jlong value, jobject entry, jlong buffer, jint bufLen)
{
struct mntent ent;
- char buf[1024];
- int buflen = sizeof(buf);
+ char * buf = (char*)jlong_to_ptr(buffer);
struct mntent* m;
FILE* fp = jlong_to_ptr(value);
jsize len;
@@ -184,7 +183,7 @@
char* fstype;
char* options;
- m = getmntent_r(fp, &ent, (char*)&buf, buflen);
+ m = getmntent_r(fp, &ent, buf, (int)bufLen);
if (m == NULL)
return -1;
name = m->mnt_fsname;
--- a/src/java.base/share/classes/java/io/BufferedReader.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/io/BufferedReader.java Fri Oct 11 12:08:01 2019 +0530
@@ -314,7 +314,7 @@
* @throws IOException If an I/O error occurs
*/
String readLine(boolean ignoreLF, boolean[] term) throws IOException {
- StringBuffer s = null;
+ StringBuilder s = null;
int startChar;
synchronized (lock) {
@@ -372,7 +372,7 @@
}
if (s == null)
- s = new StringBuffer(defaultExpectedLineLength);
+ s = new StringBuilder(defaultExpectedLineLength);
s.append(cb, startChar, i - startChar);
}
}
--- a/src/java.base/share/classes/java/io/FilePermission.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/io/FilePermission.java Fri Oct 11 12:08:01 2019 +0530
@@ -1196,7 +1196,7 @@
if ((effective & desired) == desired) {
return true;
}
- needed = (desired ^ effective);
+ needed = (desired & ~effective);
}
}
return false;
--- a/src/java.base/share/classes/java/lang/Math.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/lang/Math.java Fri Oct 11 12:08:01 2019 +0530
@@ -440,6 +440,7 @@
* floating-point value that is greater than or equal to
* the argument and is equal to a mathematical integer.
*/
+ @HotSpotIntrinsicCandidate
public static double ceil(double a) {
return StrictMath.ceil(a); // default impl. delegates to StrictMath
}
@@ -459,6 +460,7 @@
* floating-point value that less than or equal to the argument
* and is equal to a mathematical integer.
*/
+ @HotSpotIntrinsicCandidate
public static double floor(double a) {
return StrictMath.floor(a); // default impl. delegates to StrictMath
}
@@ -478,6 +480,7 @@
* @return the closest floating-point value to {@code a} that is
* equal to a mathematical integer.
*/
+ @HotSpotIntrinsicCandidate
public static double rint(double a) {
return StrictMath.rint(a); // default impl. delegates to StrictMath
}
--- a/src/java.base/share/classes/java/lang/Throwable.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/lang/Throwable.java Fri Oct 11 12:08:01 2019 +0530
@@ -230,6 +230,7 @@
* @serial
* @since 1.7
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private List<Throwable> suppressedExceptions = SUPPRESSED_SENTINEL;
/** Message for trying to suppress a null exception. */
--- a/src/java.base/share/classes/java/lang/invoke/SerializedLambda.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/lang/invoke/SerializedLambda.java Fri Oct 11 12:08:01 2019 +0530
@@ -76,6 +76,7 @@
private final String implMethodSignature;
private final int implMethodKind;
private final String instantiatedMethodType;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Object[] capturedArgs;
/**
--- a/src/java.base/share/classes/java/lang/reflect/Proxy.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/lang/reflect/Proxy.java Fri Oct 11 12:08:01 2019 +0530
@@ -308,6 +308,7 @@
* the invocation handler for this proxy instance.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected InvocationHandler h;
/**
--- a/src/java.base/share/classes/java/net/DatagramSocket.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/net/DatagramSocket.java Fri Oct 11 12:08:01 2019 +0530
@@ -434,14 +434,15 @@
* verify that datagrams are permitted to be sent and received
* respectively.
*
- * <p> When a socket is connected, {@link #receive receive} and
- * {@link #send send} <b>will not perform any security checks</b>
- * on incoming and outgoing packets, other than matching the packet's
- * and the socket's address and port. On a send operation, if the
- * packet's address is set and the packet's address and the socket's
- * address do not match, an {@code IllegalArgumentException} will be
- * thrown. A socket connected to a multicast address may only be used
- * to send packets.
+ * <p> Care should be taken to ensure that a connected datagram socket
+ * is not shared with untrusted code. When a socket is connected,
+ * {@link #receive receive} and {@link #send send} <b>will not perform
+ * any security checks</b> on incoming and outgoing packets, other than
+ * matching the packet's and the socket's address and port. On a send
+ * operation, if the packet's address is set and the packet's address
+ * and the socket's address do not match, an {@code IllegalArgumentException}
+ * will be thrown. A socket connected to a multicast address may only
+ * be used to send packets.
*
* @param address the remote address for the socket
*
@@ -708,9 +709,11 @@
* the length of the received message. If the message is longer than
* the packet's length, the message is truncated.
* <p>
- * If there is a security manager, a packet cannot be received if the
- * security manager's {@code checkAccept} method
- * does not allow it.
+ * If there is a security manager, and the socket is not currently
+ * connected to a remote address, a packet cannot be received if the
+ * security manager's {@code checkAccept} method does not allow it.
+ * Datagrams that are not permitted by the security manager are silently
+ * discarded.
*
* @param p the {@code DatagramPacket} into which to place
* the incoming data.
@@ -896,12 +899,15 @@
*
* @param timeout the specified timeout in milliseconds.
* @throws SocketException if there is an error in the underlying protocol, such as an UDP error.
+ * @throws IllegalArgumentException if {@code timeout} is negative
* @since 1.1
* @see #getSoTimeout()
*/
public synchronized void setSoTimeout(int timeout) throws SocketException {
if (isClosed())
throw new SocketException("Socket is closed");
+ if (timeout < 0)
+ throw new IllegalArgumentException("timeout < 0");
getImpl().setOption(SocketOptions.SO_TIMEOUT, timeout);
}
--- a/src/java.base/share/classes/java/net/SocketPermission.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/net/SocketPermission.java Fri Oct 11 12:08:01 2019 +0530
@@ -1433,7 +1433,7 @@
if ((effective & desired) == desired) {
return true;
}
- needed = (desired ^ effective);
+ needed = (desired & ~effective);
}
}
return false;
--- a/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -265,7 +265,10 @@
* java.lang.SecurityManager#checkAccept checkAccept} and {@link
* java.lang.SecurityManager#checkConnect checkConnect} methods permit
* datagrams to be received from and sent to, respectively, the given
- * remote address.
+ * remote address. Once connected, no further security checks are performed
+ * for datagrams received from, or sent to, the given remote address. Care
+ * should be taken to ensure that a connected datagram channel is not shared
+ * with untrusted code.
*
* <p> This method may be invoked at any time. It will not have any effect
* on read or write operations that are already in progress at the moment
@@ -325,6 +328,10 @@
* <p> If this channel's socket is not connected, or if the channel is
* closed, then invoking this method has no effect. </p>
*
+ * @apiNote If this method throws an IOException, the channel's socket
+ * may be left in an unspecified state. It is strongly recommended that
+ * the channel be closed when disconnect fails.
+ *
* @return This datagram channel
*
* @throws IOException
@@ -369,9 +376,10 @@
* to a specific remote address and a security manager has been installed
* then for each datagram received this method verifies that the source's
* address and port number are permitted by the security manager's {@link
- * java.lang.SecurityManager#checkAccept checkAccept} method. The overhead
- * of this security check can be avoided by first connecting the socket via
- * the {@link #connect connect} method.
+ * java.lang.SecurityManager#checkAccept checkAccept} method. Datagrams
+ * that are not permitted by the security manager are silently discarded.
+ * The overhead of this security check can be avoided by first connecting
+ * the socket via the {@link #connect connect} method.
*
* <p> This method may be invoked at any time. If another thread has
* already initiated a read operation upon this channel, however, then an
@@ -401,11 +409,6 @@
* closing the channel and setting the current thread's
* interrupt status
*
- * @throws SecurityException
- * If a security manager has been installed
- * and it does not permit datagrams to be accepted
- * from the datagram's sender
- *
* @throws IOException
* If some other I/O error occurs
*/
--- a/src/java.base/share/classes/java/nio/channels/SelectionKey.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/nio/channels/SelectionKey.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,8 @@
package java.nio.channels;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
/**
* A token representing the registration of a {@link SelectableChannel} with a
@@ -428,13 +429,17 @@
// -- Attachments --
+ private static final VarHandle ATTACHMENT;
+ static {
+ try {
+ MethodHandles.Lookup l = MethodHandles.lookup();
+ ATTACHMENT = l.findVarHandle(SelectionKey.class, "attachment", Object.class);
+ } catch (Exception e) {
+ throw new InternalError(e);
+ }
+ }
private volatile Object attachment;
- private static final AtomicReferenceFieldUpdater<SelectionKey,Object>
- attachmentUpdater = AtomicReferenceFieldUpdater.newUpdater(
- SelectionKey.class, Object.class, "attachment"
- );
-
/**
* Attaches the given object to this key.
*
@@ -450,7 +455,7 @@
* otherwise {@code null}
*/
public final Object attach(Object ob) {
- return attachmentUpdater.getAndSet(this, ob);
+ return ATTACHMENT.getAndSet(this, ob);
}
/**
--- a/src/java.base/share/classes/java/nio/channels/spi/AbstractSelectionKey.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/nio/channels/spi/AbstractSelectionKey.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,13 @@
package java.nio.channels.spi;
-import java.nio.channels.*;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
+import java.nio.channels.SelectionKey;
+import java.nio.channels.Selector;
+import sun.nio.ch.SelectionKeyImpl;
+import sun.nio.ch.SelectorImpl;
/**
* Base implementation class for selection keys.
@@ -41,20 +46,29 @@
public abstract class AbstractSelectionKey
extends SelectionKey
{
+ private static final VarHandle INVALID;
+ static {
+ try {
+ MethodHandles.Lookup l = MethodHandles.lookup();
+ INVALID = l.findVarHandle(AbstractSelectionKey.class, "invalid", boolean.class);
+ } catch (Exception e) {
+ throw new InternalError(e);
+ }
+ }
/**
* Initializes a new instance of this class.
*/
protected AbstractSelectionKey() { }
- private volatile boolean valid = true;
+ private volatile boolean invalid;
public final boolean isValid() {
- return valid;
+ return !invalid;
}
void invalidate() { // package-private
- valid = false;
+ invalid = true;
}
/**
@@ -64,13 +78,14 @@
* selector's cancelled-key set while synchronized on that set. </p>
*/
public final void cancel() {
- // Synchronizing "this" to prevent this key from getting canceled
- // multiple times by different threads, which might cause race
- // condition between selector's select() and channel's close().
- synchronized (this) {
- if (valid) {
- valid = false;
- ((AbstractSelector)selector()).cancel(this);
+ boolean changed = (boolean) INVALID.compareAndSet(this, false, true);
+ if (changed) {
+ Selector sel = selector();
+ if (sel instanceof SelectorImpl) {
+ // queue cancelled key directly
+ ((SelectorImpl) sel).cancel((SelectionKeyImpl) this);
+ } else {
+ ((AbstractSelector) sel).cancel(this);
}
}
}
--- a/src/java.base/share/classes/java/nio/channels/spi/AbstractSelector.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/nio/channels/spi/AbstractSelector.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,12 +26,14 @@
package java.nio.channels.spi;
import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.util.HashSet;
import java.util.Set;
import sun.nio.ch.Interruptible;
-import java.util.concurrent.atomic.AtomicBoolean;
+import sun.nio.ch.SelectorImpl;
/**
@@ -69,12 +71,23 @@
public abstract class AbstractSelector
extends Selector
{
-
- private final AtomicBoolean selectorOpen = new AtomicBoolean(true);
+ private static final VarHandle CLOSED;
+ static {
+ try {
+ MethodHandles.Lookup l = MethodHandles.lookup();
+ CLOSED = l.findVarHandle(AbstractSelector.class, "closed", boolean.class);
+ } catch (Exception e) {
+ throw new InternalError(e);
+ }
+ }
+ private volatile boolean closed;
// The provider that created this selector
private final SelectorProvider provider;
+ // cancelled-key, not used by the JDK Selector implementations
+ private final Set<SelectionKey> cancelledKeys;
+
/**
* Initializes a new instance of this class.
*
@@ -83,10 +96,14 @@
*/
protected AbstractSelector(SelectorProvider provider) {
this.provider = provider;
+ if (this instanceof SelectorImpl) {
+ // not used in JDK Selector implementations
+ this.cancelledKeys = Set.of();
+ } else {
+ this.cancelledKeys = new HashSet<>();
+ }
}
- private final Set<SelectionKey> cancelledKeys = new HashSet<SelectionKey>();
-
void cancel(SelectionKey k) { // package-private
synchronized (cancelledKeys) {
cancelledKeys.add(k);
@@ -105,10 +122,10 @@
* If an I/O error occurs
*/
public final void close() throws IOException {
- boolean open = selectorOpen.getAndSet(false);
- if (!open)
- return;
- implCloseSelector();
+ boolean changed = (boolean) CLOSED.compareAndSet(this, false, true);
+ if (changed) {
+ implCloseSelector();
+ }
}
/**
@@ -130,7 +147,7 @@
protected abstract void implCloseSelector() throws IOException;
public final boolean isOpen() {
- return selectorOpen.get();
+ return !closed;
}
/**
--- a/src/java.base/share/classes/java/nio/file/Files.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/nio/file/Files.java Fri Oct 11 12:08:01 2019 +0530
@@ -3550,8 +3550,8 @@
// ensure lines is not null before opening file
Objects.requireNonNull(lines);
CharsetEncoder encoder = cs.newEncoder();
- OutputStream out = newOutputStream(path, options);
- try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out, encoder))) {
+ try (OutputStream out = newOutputStream(path, options);
+ BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out, encoder))) {
for (CharSequence line: lines) {
writer.append(line);
writer.newLine();
--- a/src/java.base/share/classes/java/security/GuardedObject.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/security/GuardedObject.java Fri Oct 11 12:08:01 2019 +0530
@@ -52,7 +52,9 @@
@java.io.Serial
private static final long serialVersionUID = -5240450096227834308L;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object object; // the object we are guarding
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Guard guard; // the guard
/**
--- a/src/java.base/share/classes/java/security/Provider.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/security/Provider.java Fri Oct 11 12:08:01 2019 +0530
@@ -1255,9 +1255,9 @@
}
synchronized (this) {
ensureLegacyParsed();
- }
- if (legacyMap != null && !legacyMap.isEmpty()) {
- return legacyMap.get(key);
+ if (legacyMap != null && !legacyMap.isEmpty()) {
+ return legacyMap.get(key);
+ }
}
return null;
}
--- a/src/java.base/share/classes/java/security/SecureRandom.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/security/SecureRandom.java Fri Oct 11 12:08:01 2019 +0530
@@ -1043,6 +1043,7 @@
/**
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private MessageDigest digest = null;
/**
* @serial
--- a/src/java.base/share/classes/java/time/Clock.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/time/Clock.java Fri Oct 11 12:08:01 2019 +0530
@@ -641,6 +641,7 @@
static final class OffsetClock extends Clock implements Serializable {
@java.io.Serial
private static final long serialVersionUID = 2007484719125426256L;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Clock baseClock;
private final Duration offset;
@@ -692,6 +693,7 @@
static final class TickClock extends Clock implements Serializable {
@java.io.Serial
private static final long serialVersionUID = 6504659149906368850L;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Clock baseClock;
private final long tickNanos;
--- a/src/java.base/share/classes/java/time/Ser.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/time/Ser.java Fri Oct 11 12:08:01 2019 +0530
@@ -61,6 +61,7 @@
import java.io.InvalidClassException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
+import java.io.Serializable;
import java.io.StreamCorruptedException;
/**
@@ -112,7 +113,7 @@
/** The type being serialized. */
private byte type;
/** The object being serialized. */
- private Object object;
+ private Serializable object;
/**
* Constructor for deserialization.
@@ -126,7 +127,7 @@
* @param type the type
* @param object the object
*/
- Ser(byte type, Object object) {
+ Ser(byte type, Serializable object) {
this.type = type;
this.object = object;
}
@@ -224,20 +225,35 @@
* {@code Ser} object.
*
* <ul>
- * <li><a href="{@docRoot}/serialized-form.html#java.time.Duration">Duration</a> - {@code Duration.ofSeconds(seconds, nanos);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.Instant">Instant</a> - {@code Instant.ofEpochSecond(seconds, nanos);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.LocalDate">LocalDate</a> - {@code LocalDate.of(year, month, day);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.LocalDateTime">LocalDateTime</a> - {@code LocalDateTime.of(date, time);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.LocalTime">LocalTime</a> - {@code LocalTime.of(hour, minute, second, nano);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.MonthDay">MonthDay</a> - {@code MonthDay.of(month, day);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.OffsetTime">OffsetTime</a> - {@code OffsetTime.of(time, offset);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.OffsetDateTime">OffsetDateTime</a> - {@code OffsetDateTime.of(dateTime, offset);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.Period">Period</a> - {@code Period.of(years, months, days);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.Year">Year</a> - {@code Year.of(year);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.YearMonth">YearMonth</a> - {@code YearMonth.of(year, month);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.ZonedDateTime">ZonedDateTime</a> - {@code ZonedDateTime.ofLenient(dateTime, offset, zone);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.ZoneId">ZoneId</a> - {@code ZoneId.of(id);}
- * <li><a href="{@docRoot}/serialized-form.html#java.time.ZoneOffset">ZoneOffset</a> - {@code (offsetByte == 127 ? ZoneOffset.ofTotalSeconds(in.readInt()) : ZoneOffset.ofTotalSeconds(offsetByte * 900));}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.Duration">Duration</a> -
+ * {@code Duration.ofSeconds(seconds, nanos);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.Instant">Instant</a> -
+ * {@code Instant.ofEpochSecond(seconds, nanos);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.LocalDate">LocalDate</a> -
+ * {@code LocalDate.of(year, month, day);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.LocalDateTime">LocalDateTime</a> -
+ * {@code LocalDateTime.of(date, time);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.LocalTime">LocalTime</a> -
+ * {@code LocalTime.of(hour, minute, second, nano);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.MonthDay">MonthDay</a> -
+ * {@code MonthDay.of(month, day);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.OffsetTime">OffsetTime</a> -
+ * {@code OffsetTime.of(time, offset);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.OffsetDateTime">OffsetDateTime</a> -
+ * {@code OffsetDateTime.of(dateTime, offset);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.Period">Period</a> -
+ * {@code Period.of(years, months, days);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.Year">Year</a> -
+ * {@code Year.of(year);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.YearMonth">YearMonth</a> -
+ * {@code YearMonth.of(year, month);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.ZonedDateTime">ZonedDateTime</a> -
+ * {@code ZonedDateTime.ofLenient(dateTime, offset, zone);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.ZoneId">ZoneId</a> -
+ * {@code ZoneId.of(id);}
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.ZoneOffset">ZoneOffset</a> -
+ * {@code (offsetByte == 127 ? ZoneOffset.ofTotalSeconds(in.readInt()) :
+ * ZoneOffset.ofTotalSeconds(offsetByte * 900));}
* </ul>
*
* @param in the data to read, not null
@@ -247,12 +263,13 @@
object = readInternal(type, in);
}
- static Object read(ObjectInput in) throws IOException, ClassNotFoundException {
+ static Serializable read(ObjectInput in) throws IOException, ClassNotFoundException {
byte type = in.readByte();
return readInternal(type, in);
}
- private static Object readInternal(byte type, ObjectInput in) throws IOException, ClassNotFoundException {
+ private static Serializable readInternal(byte type, ObjectInput in)
+ throws IOException, ClassNotFoundException {
switch (type) {
case DURATION_TYPE: return Duration.readExternal(in);
case INSTANT_TYPE: return Instant.readExternal(in);
--- a/src/java.base/share/classes/java/time/chrono/AbstractChronology.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/time/chrono/AbstractChronology.java Fri Oct 11 12:08:01 2019 +0530
@@ -731,7 +731,7 @@
*/
@java.io.Serial
Object writeReplace() {
- return new Ser(Ser.CHRONO_TYPE, this);
+ return new Ser(Ser.CHRONO_TYPE, (Serializable)this);
}
/**
--- a/src/java.base/share/classes/java/time/chrono/ChronoPeriodImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/time/chrono/ChronoPeriodImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -109,6 +109,7 @@
/**
* The chronology.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Chronology chrono;
/**
* The number of years.
--- a/src/java.base/share/classes/java/time/chrono/Ser.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/time/chrono/Ser.java Fri Oct 11 12:08:01 2019 +0530
@@ -61,6 +61,7 @@
import java.io.InvalidClassException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
+import java.io.Serializable;
import java.io.StreamCorruptedException;
import java.time.LocalDate;
import java.time.LocalDateTime;
@@ -110,7 +111,7 @@
/** The type being serialized. */
private byte type;
/** The object being serialized. */
- private Object object;
+ private Serializable object;
/**
* Constructor for deserialization.
@@ -124,7 +125,7 @@
* @param type the type
* @param object the object
*/
- Ser(byte type, Object object) {
+ Ser(byte type, Serializable object) {
this.type = type;
this.object = object;
}
@@ -203,18 +204,30 @@
* {@code Ser} object.
*
* <ul>
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahChronology">HijrahChronology</a> - Chronology.of(id)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.IsoChronology">IsoChronology</a> - Chronology.of(id)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseChronology">JapaneseChronology</a> - Chronology.of(id)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoChronology">MinguoChronology</a> - Chronology.of(id)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistChronology">ThaiBuddhistChronology</a> - Chronology.of(id)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoLocalDateTimeImpl">ChronoLocalDateTime</a> - date.atTime(time)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoZonedDateTimeImpl">ChronoZonedDateTime</a> - dateTime.atZone(offset).withZoneSameLocal(zone)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseDate">JapaneseDate</a> - JapaneseChronology.INSTANCE.date(year, month, dayOfMonth)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseEra">JapaneseEra</a> - JapaneseEra.of(eraValue)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahDate">HijrahDate</a> - HijrahChronology chrono.date(year, month, dayOfMonth)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoDate">MinguoDate</a> - MinguoChronology.INSTANCE.date(year, month, dayOfMonth)
- * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistDate">ThaiBuddhistDate</a> - ThaiBuddhistChronology.INSTANCE.date(year, month, dayOfMonth)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahChronology">HijrahChronology</a> -
+ * Chronology.of(id)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.IsoChronology">IsoChronology</a> -
+ * Chronology.of(id)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseChronology">JapaneseChronology</a> -
+ * Chronology.of(id)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoChronology">MinguoChronology</a> -
+ * Chronology.of(id)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistChronology">ThaiBuddhistChronology</a> -
+ * Chronology.of(id)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoLocalDateTimeImpl">ChronoLocalDateTime</a> -
+ * date.atTime(time)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoZonedDateTimeImpl">ChronoZonedDateTime</a> -
+ * dateTime.atZone(offset).withZoneSameLocal(zone)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseDate">JapaneseDate</a> -
+ * JapaneseChronology.INSTANCE.date(year, month, dayOfMonth)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseEra">JapaneseEra</a> -
+ * JapaneseEra.of(eraValue)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahDate">HijrahDate</a> -
+ * HijrahChronology chrono.date(year, month, dayOfMonth)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoDate">MinguoDate</a> -
+ * MinguoChronology.INSTANCE.date(year, month, dayOfMonth)
+ * <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistDate">ThaiBuddhistDate</a> -
+ * ThaiBuddhistChronology.INSTANCE.date(year, month, dayOfMonth)
* </ul>
*
* @param in the data stream to read from, not null
@@ -225,16 +238,17 @@
object = readInternal(type, in);
}
- static Object read(ObjectInput in) throws IOException, ClassNotFoundException {
+ static Serializable read(ObjectInput in) throws IOException, ClassNotFoundException {
byte type = in.readByte();
return readInternal(type, in);
}
- private static Object readInternal(byte type, ObjectInput in) throws IOException, ClassNotFoundException {
+ private static Serializable readInternal(byte type, ObjectInput in)
+ throws IOException, ClassNotFoundException {
switch (type) {
- case CHRONO_TYPE: return AbstractChronology.readExternal(in);
- case CHRONO_LOCAL_DATE_TIME_TYPE: return ChronoLocalDateTimeImpl.readExternal(in);
- case CHRONO_ZONE_DATE_TIME_TYPE: return ChronoZonedDateTimeImpl.readExternal(in);
+ case CHRONO_TYPE: return (Serializable)AbstractChronology.readExternal(in);
+ case CHRONO_LOCAL_DATE_TIME_TYPE: return (Serializable)ChronoLocalDateTimeImpl.readExternal(in);
+ case CHRONO_ZONE_DATE_TIME_TYPE: return (Serializable)ChronoZonedDateTimeImpl.readExternal(in);
case JAPANESE_DATE_TYPE: return JapaneseDate.readExternal(in);
case JAPANESE_ERA_TYPE: return JapaneseEra.readExternal(in);
case HIJRAH_DATE_TYPE: return HijrahDate.readExternal(in);
--- a/src/java.base/share/classes/java/time/zone/Ser.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/time/zone/Ser.java Fri Oct 11 12:08:01 2019 +0530
@@ -68,6 +68,7 @@
import java.io.InvalidClassException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
+import java.io.Serializable;
import java.io.StreamCorruptedException;
import java.time.ZoneOffset;
@@ -97,7 +98,7 @@
/** The type being serialized. */
private byte type;
/** The object being serialized. */
- private Object object;
+ private Serializable object;
/**
* Constructor for deserialization.
@@ -111,7 +112,7 @@
* @param type the type
* @param object the object
*/
- Ser(byte type, Object object) {
+ Ser(byte type, Serializable object) {
this.type = type;
this.object = object;
}
@@ -183,12 +184,13 @@
object = readInternal(type, in);
}
- static Object read(DataInput in) throws IOException, ClassNotFoundException {
+ static Serializable read(DataInput in) throws IOException, ClassNotFoundException {
byte type = in.readByte();
return readInternal(type, in);
}
- private static Object readInternal(byte type, DataInput in) throws IOException, ClassNotFoundException {
+ private static Serializable readInternal(byte type, DataInput in)
+ throws IOException, ClassNotFoundException {
switch (type) {
case ZRULES:
return ZoneRules.readExternal(in);
--- a/src/java.base/share/classes/java/util/AbstractMap.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/AbstractMap.java Fri Oct 11 12:08:01 2019 +0530
@@ -607,7 +607,9 @@
@java.io.Serial
private static final long serialVersionUID = -8499721149061103585L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final K key;
+ @SuppressWarnings("serial") // Conditionally serializable
private V value;
/**
@@ -738,7 +740,9 @@
@java.io.Serial
private static final long serialVersionUID = 7138329143949025153L;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final K key;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final V value;
/**
--- a/src/java.base/share/classes/java/util/ArrayPrefixHelpers.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/ArrayPrefixHelpers.java Fri Oct 11 12:08:01 2019 +0530
@@ -103,10 +103,15 @@
static final int MIN_PARTITION = 16;
static final class CumulateTask<T> extends CountedCompleter<Void> {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
final T[] array;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
final BinaryOperator<T> function;
CumulateTask<T> left, right;
- T in, out;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ T in;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ T out;
final int lo, hi, origin, fence, threshold;
/** Root task constructor */
@@ -257,6 +262,7 @@
static final class LongCumulateTask extends CountedCompleter<Void> {
final long[] array;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
final LongBinaryOperator function;
LongCumulateTask left, right;
long in, out;
@@ -408,6 +414,7 @@
static final class DoubleCumulateTask extends CountedCompleter<Void> {
final double[] array;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
final DoubleBinaryOperator function;
DoubleCumulateTask left, right;
double in, out;
@@ -559,6 +566,7 @@
static final class IntCumulateTask extends CountedCompleter<Void> {
final int[] array;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
final IntBinaryOperator function;
IntCumulateTask left, right;
int in, out;
--- a/src/java.base/share/classes/java/util/Arrays.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/Arrays.java Fri Oct 11 12:08:01 2019 +0530
@@ -4339,6 +4339,7 @@
{
@java.io.Serial
private static final long serialVersionUID = -2764017481108945198L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final E[] a;
ArrayList(E[] array) {
--- a/src/java.base/share/classes/java/util/ArraysParallelSortHelpers.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/ArraysParallelSortHelpers.java Fri Oct 11 12:08:01 2019 +0530
@@ -115,8 +115,12 @@
static final class Sorter<T> extends CountedCompleter<Void> {
@java.io.Serial
static final long serialVersionUID = 2446542900576103244L;
- final T[] a, w;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ final T[] a;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ final T[] w;
final int base, size, wbase, gran;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
Comparator<? super T> comparator;
Sorter(CountedCompleter<?> par, T[] a, T[] w, int base, int size,
int wbase, int gran,
@@ -153,8 +157,13 @@
static final class Merger<T> extends CountedCompleter<Void> {
@java.io.Serial
static final long serialVersionUID = 2446542900576103244L;
- final T[] a, w; // main and workspace arrays
+ // main and workspace arrays
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ final T[] a;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ final T[] w;
final int lbase, lsize, rbase, rsize, wbase, gran;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
Comparator<? super T> comparator;
Merger(CountedCompleter<?> par, T[] a, T[] w,
int lbase, int lsize, int rbase,
--- a/src/java.base/share/classes/java/util/Collection.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/Collection.java Fri Oct 11 12:08:01 2019 +0530
@@ -188,6 +188,38 @@
* or if the only reference to the backing collection is through an
* unmodifiable view, the view can be considered effectively immutable.
*
+ * <h2><a id="serializable">Serializability of Collections</a></h2>
+ *
+ * <p>Serializability of collections is optional. As such, none of the collections
+ * interfaces are declared to implement the {@link java.io.Serializable} interface.
+ * However, serializability is regarded as being generally useful, so most collection
+ * implementations are serializable.
+ *
+ * <p>The collection implementations that are public classes (such as {@code ArrayList}
+ * or {@code HashMap}) are declared to implement the {@code Serializable} interface if they
+ * are in fact serializable. Some collections implementations are not public classes,
+ * such as the <a href="#unmodifiable">unmodifiable collections.</a> In such cases, the
+ * serializability of such collections is described in the specification of the method
+ * that creates them, or in some other suitable place. In cases where the serializability
+ * of a collection is not specified, there is no guarantee about the serializability of such
+ * collections. In particular, many <a href="#view">view collections</a> are not serializable.
+ *
+ * <p>A collection implementation that implements the {@code Serializable} interface cannot
+ * be guaranteed to be serializable. The reason is that in general, collections
+ * contain elements of other types, and it is not possible to determine statically
+ * whether instances of some element type are actually serializable. For example, consider
+ * a serializable {@code Collection<E>}, where {@code E} does not implement the
+ * {@code Serializable} interface. The collection may be serializable, if it contains only
+ * elements of some serializable subtype of {@code E}, or if it is empty. Collections are
+ * thus said to be <i>conditionally serializable,</i> as the serializability of the collection
+ * as a whole depends on whether the collection itself is serializable and on whether all
+ * contained elements are also serializable.
+ *
+ * <p>An additional case occurs with instances of {@link SortedSet} and {@link SortedMap}.
+ * These collections can be created with a {@link Comparator} that imposes an ordering on
+ * the set elements or map keys. Such a collection is serializable only if the provided
+ * {@code Comparator} is also serializable.
+ *
* <p>This interface is a member of the
* <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
* Java Collections Framework</a>.
--- a/src/java.base/share/classes/java/util/Collections.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/Collections.java Fri Oct 11 12:08:01 2019 +0530
@@ -1024,6 +1024,7 @@
@java.io.Serial
private static final long serialVersionUID = 1820017752578914078L;
+ @SuppressWarnings("serial") // Conditionally serializable
final Collection<? extends E> c;
UnmodifiableCollection(Collection<? extends E> c) {
@@ -1164,6 +1165,7 @@
implements SortedSet<E>, Serializable {
@java.io.Serial
private static final long serialVersionUID = -4929149591599911165L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final SortedSet<E> ss;
UnmodifiableSortedSet(SortedSet<E> s) {super(s); ss = s;}
@@ -1244,6 +1246,7 @@
/**
* The instance we are protecting.
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final NavigableSet<E> ns;
UnmodifiableNavigableSet(NavigableSet<E> s) {super(s); ns = s;}
@@ -1304,6 +1307,7 @@
@java.io.Serial
private static final long serialVersionUID = -283967356065247728L;
+ @SuppressWarnings("serial") // Conditionally serializable
final List<? extends E> list;
UnmodifiableList(List<? extends E> list) {
@@ -1450,6 +1454,7 @@
@java.io.Serial
private static final long serialVersionUID = -1034234728574286014L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final Map<? extends K, ? extends V> m;
UnmodifiableMap(Map<? extends K, ? extends V> m) {
@@ -1809,6 +1814,7 @@
@java.io.Serial
private static final long serialVersionUID = -8806743815996713206L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final SortedMap<K, ? extends V> sm;
UnmodifiableSortedMap(SortedMap<K, ? extends V> m) {super(m); sm = m; }
@@ -1886,6 +1892,7 @@
/**
* The instance we wrap and protect.
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final NavigableMap<K, ? extends V> nm;
UnmodifiableNavigableMap(NavigableMap<K, ? extends V> m)
@@ -2017,7 +2024,9 @@
@java.io.Serial
private static final long serialVersionUID = 3053995032091335093L;
+ @SuppressWarnings("serial") // Conditionally serializable
final Collection<E> c; // Backing Collection
+ @SuppressWarnings("serial") // Conditionally serializable
final Object mutex; // Object on which to synchronize
SynchronizedCollection(Collection<E> c) {
@@ -2219,6 +2228,7 @@
@java.io.Serial
private static final long serialVersionUID = 8695801310862127406L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final SortedSet<E> ss;
SynchronizedSortedSet(SortedSet<E> s) {
@@ -2314,6 +2324,7 @@
@java.io.Serial
private static final long serialVersionUID = -5505529816273629798L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final NavigableSet<E> ns;
SynchronizedNavigableSet(NavigableSet<E> s) {
@@ -2424,6 +2435,7 @@
@java.io.Serial
private static final long serialVersionUID = -7754090372962971524L;
+ @SuppressWarnings("serial") // Conditionally serializable
final List<E> list;
SynchronizedList(List<E> list) {
@@ -2591,7 +2603,9 @@
@java.io.Serial
private static final long serialVersionUID = 1978198479659022715L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final Map<K,V> m; // Backing Map
+ @SuppressWarnings("serial") // Conditionally serializable
final Object mutex; // Object on which to synchronize
SynchronizedMap(Map<K,V> m) {
@@ -2788,6 +2802,7 @@
@java.io.Serial
private static final long serialVersionUID = -8798146769416483793L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final SortedMap<K,V> sm;
SynchronizedSortedMap(SortedMap<K,V> m) {
@@ -2891,6 +2906,7 @@
@java.io.Serial
private static final long serialVersionUID = 699392247599746807L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final NavigableMap<K,V> nm;
SynchronizedNavigableMap(NavigableMap<K,V> m) {
@@ -3070,7 +3086,9 @@
@java.io.Serial
private static final long serialVersionUID = 1578914078182001775L;
+ @SuppressWarnings("serial") // Conditionally serializable
final Collection<E> c;
+ @SuppressWarnings("serial") // Conditionally serializable
final Class<E> type;
@SuppressWarnings("unchecked")
@@ -3126,6 +3144,7 @@
public boolean add(E e) { return c.add(typeCheck(e)); }
+ @SuppressWarnings("serial") // Conditionally serializable
private E[] zeroLengthElementArray; // Lazily initialized
private E[] zeroLengthElementArray() {
@@ -3219,6 +3238,7 @@
{
@java.io.Serial
private static final long serialVersionUID = 1433151992604707767L;
+ @SuppressWarnings("serial") // Conditionally serializable
final Queue<E> queue;
CheckedQueue(Queue<E> queue, Class<E> elementType) {
@@ -3323,6 +3343,7 @@
@java.io.Serial
private static final long serialVersionUID = 1599911165492914959L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final SortedSet<E> ss;
CheckedSortedSet(SortedSet<E> s, Class<E> type) {
@@ -3387,6 +3408,7 @@
@java.io.Serial
private static final long serialVersionUID = -5429120189805438922L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final NavigableSet<E> ns;
CheckedNavigableSet(NavigableSet<E> s, Class<E> type) {
@@ -3470,6 +3492,7 @@
{
@java.io.Serial
private static final long serialVersionUID = 65247728283967356L;
+ @SuppressWarnings("serial") // Conditionally serializable
final List<E> list;
CheckedList(List<E> list, Class<E> type) {
@@ -3619,8 +3642,11 @@
@java.io.Serial
private static final long serialVersionUID = 5742860141034234728L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final Map<K, V> m;
+ @SuppressWarnings("serial") // Conditionally serializable
final Class<K> keyType;
+ @SuppressWarnings("serial") // Conditionally serializable
final Class<V> valueType;
private void typeCheck(Object key, Object value) {
@@ -4019,6 +4045,7 @@
@java.io.Serial
private static final long serialVersionUID = 1599671320688067438L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final SortedMap<K, V> sm;
CheckedSortedMap(SortedMap<K, V> m,
@@ -4094,6 +4121,7 @@
@java.io.Serial
private static final long serialVersionUID = -4852462692372534096L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final NavigableMap<K, V> nm;
CheckedNavigableMap(NavigableMap<K, V> m,
@@ -4825,6 +4853,7 @@
@java.io.Serial
private static final long serialVersionUID = 3193687207550431679L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final E element;
SingletonSet(E e) {element = e;}
@@ -4879,6 +4908,7 @@
@java.io.Serial
private static final long serialVersionUID = 3093736618740652951L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final E element;
SingletonList(E obj) {element = obj;}
@@ -4948,7 +4978,9 @@
@java.io.Serial
private static final long serialVersionUID = -6979724477215052911L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final K k;
+ @SuppressWarnings("serial") // Conditionally serializable
private final V v;
SingletonMap(K key, V value) {
@@ -5087,6 +5119,7 @@
private static final long serialVersionUID = 2739099268398711800L;
final int n;
+ @SuppressWarnings("serial") // Conditionally serializable
final E element;
CopiesList(int n, E e) {
@@ -5320,6 +5353,7 @@
*
* @serial
*/
+ @SuppressWarnings("serial") // Conditionally serializable
final Comparator<T> cmp;
ReverseComparator2(Comparator<T> cmp) {
@@ -5601,6 +5635,7 @@
private static class SetFromMap<E> extends AbstractSet<E>
implements Set<E>, Serializable
{
+ @SuppressWarnings("serial") // Conditionally serializable
private final Map<E, Boolean> m; // The backing map
private transient Set<E> s; // Its keySet
@@ -5686,6 +5721,7 @@
implements Queue<E>, Serializable {
@java.io.Serial
private static final long serialVersionUID = 1802017725587941708L;
+ @SuppressWarnings("serial") // Conditionally serializable
private final Deque<E> q;
AsLIFOQueue(Deque<E> q) { this.q = q; }
public boolean add(E e) { q.addFirst(e); return true; }
--- a/src/java.base/share/classes/java/util/Comparators.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/Comparators.java Fri Oct 11 12:08:01 2019 +0530
@@ -66,6 +66,7 @@
private static final long serialVersionUID = -7569533591570686392L;
private final boolean nullFirst;
// if null, non-null Ts are considered equal
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Comparator<T> real;
@SuppressWarnings("unchecked")
--- a/src/java.base/share/classes/java/util/PriorityQueue.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/PriorityQueue.java Fri Oct 11 12:08:01 2019 +0530
@@ -111,6 +111,7 @@
* The comparator, or null if priority queue uses elements'
* natural ordering.
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final Comparator<? super E> comparator;
/**
--- a/src/java.base/share/classes/java/util/TreeMap.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/TreeMap.java Fri Oct 11 12:08:01 2019 +0530
@@ -118,6 +118,7 @@
*
* @serial
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final Comparator<? super K> comparator;
private transient Entry<K,V> root;
@@ -1353,7 +1354,10 @@
* if loInclusive is true, lo is the inclusive bound, else lo
* is the exclusive bound. Similarly for the upper bound.
*/
- final K lo, hi;
+ @SuppressWarnings("serial") // Conditionally serializable
+ final K lo;
+ @SuppressWarnings("serial") // Conditionally serializable
+ final K hi;
final boolean fromStart, toEnd;
final boolean loInclusive, hiInclusive;
@@ -1936,6 +1940,7 @@
super(m, fromStart, lo, loInclusive, toEnd, hi, hiInclusive);
}
+ @SuppressWarnings("serial") // Conditionally serializable
private final Comparator<? super K> reverseComparator =
Collections.reverseOrder(m.comparator);
@@ -2024,7 +2029,10 @@
@java.io.Serial
private static final long serialVersionUID = -6520786458950516097L;
private boolean fromStart = false, toEnd = false;
- private K fromKey, toKey;
+ @SuppressWarnings("serial") // Conditionally serializable
+ private K fromKey;
+ @SuppressWarnings("serial") // Conditionally serializable
+ private K toKey;
@java.io.Serial
private Object readResolve() {
return new AscendingSubMap<>(TreeMap.this,
--- a/src/java.base/share/classes/java/util/Vector.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/Vector.java Fri Oct 11 12:08:01 2019 +0530
@@ -102,6 +102,7 @@
*
* @serial
*/
+ @SuppressWarnings("serial") // Conditionally serializable
protected Object[] elementData;
/**
--- a/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedLongSynchronizer.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedLongSynchronizer.java Fri Oct 11 12:08:01 2019 +0530
@@ -130,7 +130,7 @@
}
public final boolean block() {
- while (!isReleasable()) LockSupport.park(this);
+ while (!isReleasable()) LockSupport.park();
return true;
}
}
--- a/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedSynchronizer.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/concurrent/locks/AbstractQueuedSynchronizer.java Fri Oct 11 12:08:01 2019 +0530
@@ -502,7 +502,7 @@
}
public final boolean block() {
- while (!isReleasable()) LockSupport.park(this);
+ while (!isReleasable()) LockSupport.park();
return true;
}
}
--- a/src/java.base/share/classes/java/util/jar/JarVerifier.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/jar/JarVerifier.java Fri Oct 11 12:08:01 2019 +0530
@@ -590,6 +590,7 @@
URL vlocation;
CodeSigner[] vsigners;
java.security.cert.Certificate[] vcerts;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
Object csdomain;
VerifierCodeSource(Object csdomain, URL location, CodeSigner[] signers) {
--- a/src/java.base/share/classes/java/util/stream/Collector.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/java/util/stream/Collector.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,10 @@
import java.util.function.Function;
import java.util.function.Supplier;
+// A compilation test for the code snippets in this class-level javadoc can be found at:
+// test/jdk/java/util/stream/test/org/openjdk/tests/java/util/stream/CollectorExample.java
+// The test needs to be updated if the examples in this javadoc change or new examples are added.
+
/**
* A <a href="package-summary.html#Reduction">mutable reduction operation</a> that
* accumulates input elements into a mutable result container, optionally transforming
@@ -154,7 +158,7 @@
* Performing a reduction operation with a {@code Collector} should produce a
* result equivalent to:
* <pre>{@code
- * R container = collector.supplier().get();
+ * A container = collector.supplier().get();
* for (T t : data)
* collector.accumulator().accept(container, t);
* return collector.finisher().apply(container);
--- a/src/java.base/share/classes/javax/crypto/CryptoPermission.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/javax/crypto/CryptoPermission.java Fri Oct 11 12:08:01 2019 +0530
@@ -55,6 +55,7 @@
private String alg;
private int maxKeySize = Integer.MAX_VALUE; // no restriction on maxKeySize
private String exemptionMechanism = null;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private AlgorithmParameterSpec algParamSpec = null;
private boolean checkParam = false; // no restriction on param
--- a/src/java.base/share/classes/javax/security/auth/PrivateCredentialPermission.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/javax/security/auth/PrivateCredentialPermission.java Fri Oct 11 12:08:01 2019 +0530
@@ -119,6 +119,7 @@
* The set contains elements of type,
* {@code PrivateCredentialPermission.CredOwner}.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Set<Principal> principals; // ignored - kept around for compatibility
private transient CredOwner[] credOwners;
--- a/src/java.base/share/classes/javax/security/auth/Subject.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/javax/security/auth/Subject.java Fri Oct 11 12:08:01 2019 +0530
@@ -111,6 +111,7 @@
* {@code java.security.Principal}.
* The set is a {@code Subject.SecureSet}.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
Set<Principal> principals;
/**
--- a/src/java.base/share/classes/javax/security/auth/callback/UnsupportedCallbackException.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/javax/security/auth/callback/UnsupportedCallbackException.java Fri Oct 11 12:08:01 2019 +0530
@@ -39,6 +39,7 @@
/**
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Callback callback;
/**
--- a/src/java.base/share/classes/sun/net/www/http/HttpClient.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/net/www/http/HttpClient.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1994, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -707,11 +707,7 @@
} else {
// try once more
openServer();
- if (needsTunneling()) {
- MessageHeader origRequests = requests;
- httpuc.doTunneling();
- requests = origRequests;
- }
+ checkTunneling(httpuc);
afterConnect();
writeRequests(requests, poster);
return parseHTTP(responses, pi, httpuc);
@@ -722,6 +718,18 @@
}
+ // Check whether tunnel must be open and open it if necessary
+ // (in the case of HTTPS with proxy)
+ private void checkTunneling(HttpURLConnection httpuc) throws IOException {
+ if (needsTunneling()) {
+ MessageHeader origRequests = requests;
+ PosterOutputStream origPoster = poster;
+ httpuc.doTunneling();
+ requests = origRequests;
+ poster = origPoster;
+ }
+ }
+
private boolean parseHTTPHeader(MessageHeader responses, ProgressSource pi, HttpURLConnection httpuc)
throws IOException {
/* If "HTTP/*" is found in the beginning, return true. Let
@@ -849,11 +857,7 @@
closeServer();
cachedHttpClient = false;
openServer();
- if (needsTunneling()) {
- MessageHeader origRequests = requests;
- httpuc.doTunneling();
- requests = origRequests;
- }
+ checkTunneling(httpuc);
afterConnect();
writeRequests(requests, poster);
return parseHTTP(responses, pi, httpuc);
--- a/src/java.base/share/classes/sun/net/www/protocol/http/NegotiateAuthentication.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/net/www/protocol/http/NegotiateAuthentication.java Fri Oct 11 12:08:01 2019 +0530
@@ -49,6 +49,7 @@
private static final long serialVersionUID = 100L;
private static final PlatformLogger logger = HttpURLConnection.getHttpLogger();
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final HttpCallerInfo hci;
// These maps are used to manage the GSS availability for diffrent
@@ -67,6 +68,7 @@
}
// The HTTP Negotiate Helper
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Negotiator negotiator = null;
/**
--- a/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -875,6 +875,11 @@
if (state == ST_CONNECTED)
throw new AlreadyConnectedException();
+ // ensure that the socket is bound
+ if (localAddress == null) {
+ bindInternal(null);
+ }
+
int n = Net.connect(family,
fd,
isa.getAddress(),
@@ -932,8 +937,21 @@
remoteAddress = null;
state = ST_UNCONNECTED;
- // refresh local address
- localAddress = Net.localAddress(fd);
+ // check whether rebind is needed
+ InetSocketAddress isa = Net.localAddress(fd);
+ if (isa.getPort() == 0) {
+ // On Linux, if bound to ephemeral port,
+ // disconnect does not preserve that port.
+ // In this case, try to rebind to the previous port.
+ int port = localAddress.getPort();
+ localAddress = isa; // in case Net.bind fails
+ Net.bind(family, fd, isa.getAddress(), port);
+ isa = Net.localAddress(fd); // refresh address
+ assert isa.getPort() == port;
+ }
+
+ // refresh localAddress
+ localAddress = isa;
}
} finally {
writeLock.unlock();
--- a/src/java.base/share/classes/sun/nio/ch/SelectorImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/nio/ch/SelectorImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,9 @@
import java.nio.channels.spi.AbstractSelectableChannel;
import java.nio.channels.spi.AbstractSelector;
import java.nio.channels.spi.SelectorProvider;
+import java.util.ArrayDeque;
import java.util.Collections;
+import java.util.Deque;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Objects;
@@ -46,7 +48,7 @@
* Base Selector implementation class.
*/
-abstract class SelectorImpl
+public abstract class SelectorImpl
extends AbstractSelector
{
// The set of keys registered with this Selector
@@ -59,6 +61,9 @@
private final Set<SelectionKey> publicKeys; // Immutable
private final Set<SelectionKey> publicSelectedKeys; // Removal allowed, but not addition
+ // pending cancelled keys for deregistration
+ private final Deque<SelectionKeyImpl> cancelledKeys = new ArrayDeque<>();
+
// used to check for reentrancy
private boolean inSelect;
@@ -203,7 +208,8 @@
if (!(ch instanceof SelChImpl))
throw new IllegalSelectorException();
SelectionKeyImpl k = new SelectionKeyImpl((SelChImpl)ch, this);
- k.attach(attachment);
+ if (attachment != null)
+ k.attach(attachment);
// register (if needed) before adding to key set
implRegister(k);
@@ -239,33 +245,36 @@
protected abstract void implDereg(SelectionKeyImpl ski) throws IOException;
/**
- * Invoked by selection operations to process the cancelled-key set
+ * Queue a cancelled key for the next selection operation
+ */
+ public void cancel(SelectionKeyImpl ski) {
+ synchronized (cancelledKeys) {
+ cancelledKeys.addLast(ski);
+ }
+ }
+
+ /**
+ * Invoked by selection operations to process the cancelled keys
*/
protected final void processDeregisterQueue() throws IOException {
assert Thread.holdsLock(this);
assert Thread.holdsLock(publicSelectedKeys);
- Set<SelectionKey> cks = cancelledKeys();
- synchronized (cks) {
- if (!cks.isEmpty()) {
- Iterator<SelectionKey> i = cks.iterator();
- while (i.hasNext()) {
- SelectionKeyImpl ski = (SelectionKeyImpl)i.next();
- i.remove();
-
- // remove the key from the selector
- implDereg(ski);
+ synchronized (cancelledKeys) {
+ SelectionKeyImpl ski;
+ while ((ski = cancelledKeys.pollFirst()) != null) {
+ // remove the key from the selector
+ implDereg(ski);
- selectedKeys.remove(ski);
- keys.remove(ski);
+ selectedKeys.remove(ski);
+ keys.remove(ski);
- // remove from channel's key set
- deregister(ski);
+ // remove from channel's key set
+ deregister(ski);
- SelectableChannel ch = ski.channel();
- if (!ch.isOpen() && !ch.isRegistered())
- ((SelChImpl)ch).kill();
- }
+ SelectableChannel ch = ski.channel();
+ if (!ch.isOpen() && !ch.isRegistered())
+ ((SelChImpl)ch).kill();
}
}
}
--- a/src/java.base/share/classes/sun/reflect/annotation/AnnotationInvocationHandler.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/reflect/annotation/AnnotationInvocationHandler.java Fri Oct 11 12:08:01 2019 +0530
@@ -44,6 +44,7 @@
@java.io.Serial
private static final long serialVersionUID = 6182022883658399397L;
private final Class<? extends Annotation> type;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Map<String, Object> memberValues;
AnnotationInvocationHandler(Class<? extends Annotation> type, Map<String, Object> memberValues) {
--- a/src/java.base/share/classes/sun/reflect/annotation/AnnotationTypeMismatchExceptionProxy.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/reflect/annotation/AnnotationTypeMismatchExceptionProxy.java Fri Oct 11 12:08:01 2019 +0530
@@ -36,7 +36,8 @@
class AnnotationTypeMismatchExceptionProxy extends ExceptionProxy {
@java.io.Serial
private static final long serialVersionUID = 7844069490309503934L;
- private Method member;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ private Method member; // Would be more robust to null-out in a writeObject method.
private final String foundType;
/**
--- a/src/java.base/share/classes/sun/security/internal/spec/TlsKeyMaterialSpec.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/internal/spec/TlsKeyMaterialSpec.java Fri Oct 11 12:08:01 2019 +0530
@@ -50,7 +50,11 @@
private final SecretKey clientMacKey, serverMacKey;
private final SecretKey clientCipherKey, serverCipherKey;
- private final IvParameterSpec clientIv, serverIv;
+
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ private final IvParameterSpec clientIv;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
+ private final IvParameterSpec serverIv;
/**
* Constructs a new TlsKeymaterialSpec from the client and server MAC
--- a/src/java.base/share/classes/sun/security/provider/PolicyParser.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/provider/PolicyParser.java Fri Oct 11 12:08:01 2019 +0530
@@ -1315,7 +1315,9 @@
private static final long serialVersionUID = -4330692689482574072L;
private String i18nMessage;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private LocalizedMessage localizedMsg;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object[] source;
/**
--- a/src/java.base/share/classes/sun/security/provider/SubjectCodeSource.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/provider/SubjectCodeSource.java Fri Oct 11 12:08:01 2019 +0530
@@ -54,6 +54,7 @@
private static final Class<?>[] PARAMS = { String.class };
private static final sun.security.util.Debug debug =
sun.security.util.Debug.getInstance("auth", "\t[Auth Access]");
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private ClassLoader sysClassLoader;
/**
--- a/src/java.base/share/classes/sun/security/provider/certpath/X509CertPath.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/provider/certpath/X509CertPath.java Fri Oct 11 12:08:01 2019 +0530
@@ -69,6 +69,7 @@
/**
* List of certificates in this chain
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private List<X509Certificate> certs;
/**
--- a/src/java.base/share/classes/sun/security/rsa/RSAPrivateCrtKeyImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/rsa/RSAPrivateCrtKeyImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -70,6 +70,7 @@
// Optional parameters associated with this RSA key
// specified in the encoding of its AlgorithmId.
// Must be null for "RSA" keys.
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private AlgorithmParameterSpec keyParams;
/**
--- a/src/java.base/share/classes/sun/security/rsa/RSAPrivateKeyImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/rsa/RSAPrivateKeyImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -61,6 +61,7 @@
// optional parameters associated with this RSA key
// specified in the encoding of its AlgorithmId.
// must be null for "RSA" keys.
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final AlgorithmParameterSpec keyParams;
/**
--- a/src/java.base/share/classes/sun/security/rsa/RSAPublicKeyImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/rsa/RSAPublicKeyImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -62,6 +62,7 @@
// optional parameters associated with this RSA key
// specified in the encoding of its AlgorithmId
// must be null for "RSA" keys.
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private AlgorithmParameterSpec keyParams;
/**
--- a/src/java.base/share/classes/sun/security/util/ObjectIdentifier.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/util/ObjectIdentifier.java Fri Oct 11 12:08:01 2019 +0530
@@ -98,6 +98,7 @@
* Changed to Object
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object components = null; // path from root
/**
* @serial
--- a/src/java.base/share/classes/sun/security/validator/ValidatorException.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/validator/ValidatorException.java Fri Oct 11 12:08:01 2019 +0530
@@ -62,6 +62,7 @@
public static final Object T_UNTRUSTED_CERT =
"Untrusted certificate";
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object type;
private X509Certificate cert;
--- a/src/java.base/share/classes/sun/security/x509/AlgorithmId.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/x509/AlgorithmId.java Fri Oct 11 12:08:01 2019 +0530
@@ -72,6 +72,7 @@
private ObjectIdentifier algid;
// The (parsed) parameters
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private AlgorithmParameters algParams;
private boolean constructedFromDer = true;
@@ -80,6 +81,7 @@
* DER-encoded form; subclasses can be made to automaticaly parse
* them so there is fast access to these parameters.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected DerValue params;
--- a/src/java.base/share/classes/sun/security/x509/X509CertImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/x509/X509CertImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -70,6 +70,7 @@
* @author Hemma Prafullchandra
* @see X509CertInfo
*/
+@SuppressWarnings("serial") // See writeReplace method in Certificate
public class X509CertImpl extends X509Certificate implements DerEncoder {
@java.io.Serial
--- a/src/java.base/share/classes/sun/security/x509/X509Key.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/classes/sun/security/x509/X509Key.java Fri Oct 11 12:08:01 2019 +0530
@@ -84,7 +84,7 @@
private int unusedBits = 0;
/* BitArray form of key */
- private BitArray bitStringKey = null;
+ private transient BitArray bitStringKey = null;
/* The encoding for the key. */
protected byte[] encodedKey;
--- a/src/java.base/share/native/libjava/Class.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libjava/Class.c Fri Oct 11 12:08:01 2019 +0530
@@ -35,12 +35,9 @@
#include "jni.h"
#include "jni_util.h"
#include "jvm.h"
+#include "check_classname.h"
#include "java_lang_Class.h"
-/* defined in libverify.so/verify.dll (src file common/check_format.c) */
-extern jboolean VerifyClassname(char *utf_name, jboolean arrayAllowed);
-extern jboolean VerifyFixClassname(char *utf_name);
-
#define OBJ "Ljava/lang/Object;"
#define CLS "Ljava/lang/Class;"
#define CPL "Ljdk/internal/reflect/ConstantPool;"
@@ -123,14 +120,14 @@
}
(*env)->GetStringUTFRegion(env, classname, 0, unicode_len, clname);
- if (VerifyFixClassname(clname) == JNI_TRUE) {
+ if (verifyFixClassname(clname) == JNI_TRUE) {
/* slashes present in clname, use name b4 translation for exception */
(*env)->GetStringUTFRegion(env, classname, 0, unicode_len, clname);
JNU_ThrowClassNotFoundException(env, clname);
goto done;
}
- if (!VerifyClassname(clname, JNI_TRUE)) { /* expects slashed name */
+ if (!verifyClassname(clname, JNI_TRUE)) { /* expects slashed name */
JNU_ThrowClassNotFoundException(env, clname);
goto done;
}
--- a/src/java.base/share/native/libjava/ClassLoader.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libjava/ClassLoader.c Fri Oct 11 12:08:01 2019 +0530
@@ -30,14 +30,11 @@
#include "jni_util.h"
#include "jlong.h"
#include "jvm.h"
+#include "check_classname.h"
#include "java_lang_ClassLoader.h"
#include "java_lang_ClassLoader_NativeLibrary.h"
#include <string.h>
-/* defined in libverify.so/verify.dll (src file common/check_format.c) */
-extern jboolean VerifyClassname(char *utf_name, jboolean arrayAllowed);
-extern jboolean VerifyFixClassname(char *utf_name);
-
static JNINativeMethod methods[] = {
{"retrieveDirectives", "()Ljava/lang/AssertionStatusDirectives;", (void *)&JVM_AssertionStatusDirectives}
};
@@ -120,7 +117,7 @@
if (utfName == NULL) {
goto free_body;
}
- VerifyFixClassname(utfName);
+ fixClassname(utfName);
} else {
utfName = NULL;
}
@@ -185,7 +182,7 @@
JNU_ThrowOutOfMemoryError(env, NULL);
return result;
}
- VerifyFixClassname(utfName);
+ fixClassname(utfName);
} else {
utfName = NULL;
}
@@ -231,9 +228,9 @@
JNU_ThrowOutOfMemoryError(env, NULL);
return NULL;
}
- VerifyFixClassname(clname);
+ fixClassname(clname);
- if (!VerifyClassname(clname, JNI_TRUE)) { /* expects slashed name */
+ if (!verifyClassname(clname, JNI_TRUE)) { /* expects slashed name */
goto done;
}
--- a/src/java.base/share/native/libjava/VM.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libjava/VM.c Fri Oct 11 12:08:01 2019 +0530
@@ -42,11 +42,6 @@
JNIEXPORT void JNICALL
Java_jdk_internal_misc_VM_initialize(JNIEnv *env, jclass cls) {
- if (!JDK_InitJvmHandle()) {
- JNU_ThrowInternalError(env, "Handle for JVM not found for symbol lookup");
- return;
- }
-
// Registers implementations of native methods described in methods[]
// above.
// In particular, registers JVM_GetNanoTimeAdjustment as the implementation
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/java.base/share/native/libjava/check_classname.c Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <setjmp.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "jni.h"
+#include "jvm.h"
+#include "check_classname.h"
+
+typedef unsigned short unicode;
+
+static char *
+skip_over_fieldname(char *name, jboolean slash_okay,
+ unsigned int len);
+static char *
+skip_over_field_signature(char *name, jboolean void_okay,
+ unsigned int len);
+
+/*
+ * Return non-zero if the character is a valid in JVM class name, zero
+ * otherwise. The only characters currently disallowed from JVM class
+ * names are given in the table below:
+ *
+ * Character Hex Decimal
+ * '.' 0x2e 46
+ * '/' 0x2f 47
+ * ';' 0x3b 59
+ * '[' 0x5b 91
+ *
+ * (Method names have further restrictions dealing with the '<' and
+ * '>' characters.)
+ */
+static int isJvmIdentifier(unicode ch) {
+ if( ch > 91 || ch < 46 )
+ return 1; /* Lowercase ASCII letters are > 91 */
+ else { /* 46 <= ch <= 91 */
+ if (ch <= 90 && ch >= 60) {
+ return 1; /* Uppercase ASCII recognized here */
+ } else { /* ch == 91 || 46 <= ch <= 59 */
+ if (ch == 91 || ch == 59 || ch <= 47)
+ return 0;
+ else
+ return 1;
+ }
+ }
+}
+
+static unicode
+next_utf2unicode(char **utfstring_ptr, int * valid)
+{
+ unsigned char *ptr = (unsigned char *)(*utfstring_ptr);
+ unsigned char ch, ch2, ch3;
+ int length = 1; /* default length */
+ unicode result = 0x80; /* default bad result; */
+ *valid = 1;
+ switch ((ch = ptr[0]) >> 4) {
+ default:
+ result = ch;
+ break;
+
+ case 0x8: case 0x9: case 0xA: case 0xB: case 0xF:
+ /* Shouldn't happen. */
+ *valid = 0;
+ break;
+
+ case 0xC: case 0xD:
+ /* 110xxxxx 10xxxxxx */
+ if (((ch2 = ptr[1]) & 0xC0) == 0x80) {
+ unsigned char high_five = ch & 0x1F;
+ unsigned char low_six = ch2 & 0x3F;
+ result = (high_five << 6) + low_six;
+ length = 2;
+ }
+ break;
+
+ case 0xE:
+ /* 1110xxxx 10xxxxxx 10xxxxxx */
+ if (((ch2 = ptr[1]) & 0xC0) == 0x80) {
+ if (((ch3 = ptr[2]) & 0xC0) == 0x80) {
+ unsigned char high_four = ch & 0x0f;
+ unsigned char mid_six = ch2 & 0x3f;
+ unsigned char low_six = ch3 & 0x3f;
+ result = (((high_four << 6) + mid_six) << 6) + low_six;
+ length = 3;
+ } else {
+ length = 2;
+ }
+ }
+ break;
+ } /* end of switch */
+
+ *utfstring_ptr = (char *)(ptr + length);
+ return result;
+}
+
+/* Take pointer to a string. Skip over the longest part of the string that
+ * could be taken as a fieldname. Allow '/' if slash_okay is JNI_TRUE.
+ *
+ * Return a pointer to just past the fieldname. Return NULL if no fieldname
+ * at all was found, or in the case of slash_okay being true, we saw
+ * consecutive slashes (meaning we were looking for a qualified path but
+ * found something that was badly-formed).
+ */
+static char *
+skip_over_fieldname(char *name, jboolean slash_okay,
+ unsigned int length)
+{
+ char *p;
+ unicode ch;
+ unicode last_ch = 0;
+ int valid = 1;
+ /* last_ch == 0 implies we are looking at the first char. */
+ for (p = name; p != name + length; last_ch = ch) {
+ char *old_p = p;
+ ch = *p;
+ if (ch < 128) {
+ p++;
+ if (isJvmIdentifier(ch)) {
+ continue;
+ }
+ } else {
+ char *tmp_p = p;
+ ch = next_utf2unicode(&tmp_p, &valid);
+ if (valid == 0)
+ return 0;
+ p = tmp_p;
+ if (isJvmIdentifier(ch)) {
+ continue;
+ }
+ }
+
+ if (slash_okay && ch == '/' && last_ch) {
+ if (last_ch == '/') {
+ return 0; /* Don't permit consecutive slashes */
+ }
+ } else if (ch == '_' || ch == '$') {
+ } else {
+ return last_ch ? old_p : 0;
+ }
+ }
+ return last_ch ? p : 0;
+}
+
+/* Take pointer to a string. Skip over the longest part of the string that
+ * could be taken as a field signature. Allow "void" if void_okay.
+ *
+ * Return a pointer to just past the signature. Return NULL if no legal
+ * signature is found.
+ */
+
+static char *
+skip_over_field_signature(char *name, jboolean void_okay,
+ unsigned int length)
+{
+ unsigned int array_dim = 0;
+ for (;length > 0;) {
+ switch (name[0]) {
+ case JVM_SIGNATURE_VOID:
+ if (!void_okay) return 0;
+ /* FALL THROUGH */
+ case JVM_SIGNATURE_BOOLEAN:
+ case JVM_SIGNATURE_BYTE:
+ case JVM_SIGNATURE_CHAR:
+ case JVM_SIGNATURE_SHORT:
+ case JVM_SIGNATURE_INT:
+ case JVM_SIGNATURE_FLOAT:
+ case JVM_SIGNATURE_LONG:
+ case JVM_SIGNATURE_DOUBLE:
+ return name + 1;
+
+ case JVM_SIGNATURE_CLASS: {
+ /* Skip over the classname, if one is there. */
+ char *p =
+ skip_over_fieldname(name + 1, JNI_TRUE, --length);
+ /* The next character better be a semicolon. */
+ if (p && p - name - 1 > 0 && p[0] == ';')
+ return p + 1;
+ return 0;
+ }
+
+ case JVM_SIGNATURE_ARRAY:
+ array_dim++;
+ /* JVMS 2nd ed. 4.10 */
+ /* The number of dimensions in an array is limited to 255 ... */
+ if (array_dim > 255) {
+ return 0;
+ }
+ /* The rest of what's there better be a legal signature. */
+ name++;
+ length--;
+ void_okay = JNI_FALSE;
+ break;
+
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/* Determine if the specified name is legal
+ * UTF name for a classname.
+ *
+ * Note that this routine expects the internal form of qualified classes:
+ * the dots should have been replaced by slashes.
+ */
+jboolean verifyClassname(char *name, jboolean allowArrayClass)
+{
+ size_t s = strlen(name);
+ assert(s <= UINT_MAX);
+ unsigned int length = (unsigned int)s;
+ char *p;
+
+ if (length > 0 && name[0] == JVM_SIGNATURE_ARRAY) {
+ if (!allowArrayClass) {
+ return JNI_FALSE;
+ } else {
+ /* Everything that's left better be a field signature */
+ p = skip_over_field_signature(name, JNI_FALSE, length);
+ }
+ } else {
+ /* skip over the fieldname. Slashes are okay */
+ p = skip_over_fieldname(name, JNI_TRUE, length);
+ }
+ return (p != 0 && p - name == (ptrdiff_t)length);
+}
+
+/*
+ * Translates '.' to '/'. Returns JNI_TRUE if any / were present.
+ */
+jboolean verifyFixClassname(char *name)
+{
+ char *p = name;
+ jboolean slashesFound = JNI_FALSE;
+ int valid = 1;
+
+ while (valid != 0 && *p != '\0') {
+ if (*p == '/') {
+ slashesFound = JNI_TRUE;
+ p++;
+ } else if (*p == '.') {
+ *p++ = '/';
+ } else {
+ next_utf2unicode(&p, &valid);
+ }
+ }
+
+ return slashesFound && valid != 0;
+}
+
+/*
+ * Translates '.' to '/'.
+ */
+void fixClassname(char *name)
+{
+ char *p = name;
+ int valid = 1;
+
+ while (valid != 0 && *p != '\0') {
+ if (*p == '.') {
+ *p++ = '/';
+ } else {
+ next_utf2unicode(&p, &valid);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/java.base/share/native/libjava/check_classname.h Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "jni.h"
+
+/*
+ * Class name checking methods
+ */
+
+jboolean verifyClassname(char *name, jboolean allowArrayClass);
+jboolean verifyFixClassname(char *name);
+void fixClassname(char *name);
--- a/src/java.base/share/native/libjava/jdk_util.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libjava/jdk_util.h Fri Oct 11 12:08:01 2019 +0530
@@ -45,20 +45,6 @@
JNIEXPORT void
JDK_GetVersionInfo0(jdk_version_info* info, size_t info_size);
-
-/*-------------------------------------------------------
- * Internal interface for JDK to use
- *-------------------------------------------------------
- */
-
-/* Init JVM handle for symbol lookup;
- * Return 0 if JVM handle not found.
- */
-int JDK_InitJvmHandle();
-
-/* Find the named JVM entry; returns NULL if not found. */
-void* JDK_FindJvmEntry(const char* name);
-
#ifdef __cplusplus
} /* extern "C" */
#endif /* __cplusplus */
--- a/src/java.base/share/native/libjava/jni_util.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libjava/jni_util.c Fri Oct 11 12:08:01 2019 +0530
@@ -77,77 +77,23 @@
}
JNIEXPORT void JNICALL
-JNU_ThrowIllegalAccessError(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/IllegalAccessError", msg);
-}
-
-JNIEXPORT void JNICALL
-JNU_ThrowIllegalAccessException(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/IllegalAccessException", msg);
-}
-
-JNIEXPORT void JNICALL
JNU_ThrowInternalError(JNIEnv *env, const char *msg)
{
JNU_ThrowByName(env, "java/lang/InternalError", msg);
}
JNIEXPORT void JNICALL
-JNU_ThrowNoSuchFieldException(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/NoSuchFieldException", msg);
-}
-
-JNIEXPORT void JNICALL
-JNU_ThrowNoSuchMethodException(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/NoSuchMethodException", msg);
-}
-
-JNIEXPORT void JNICALL
JNU_ThrowClassNotFoundException(JNIEnv *env, const char *msg)
{
JNU_ThrowByName(env, "java/lang/ClassNotFoundException", msg);
}
JNIEXPORT void JNICALL
-JNU_ThrowNumberFormatException(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/NumberFormatException", msg);
-}
-
-JNIEXPORT void JNICALL
JNU_ThrowIOException(JNIEnv *env, const char *msg)
{
JNU_ThrowByName(env, "java/io/IOException", msg);
}
-JNIEXPORT void JNICALL
-JNU_ThrowNoSuchFieldError(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/NoSuchFieldError", msg);
-}
-
-JNIEXPORT void JNICALL
-JNU_ThrowNoSuchMethodError(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/NoSuchMethodError", msg);
-}
-
-JNIEXPORT void JNICALL
-JNU_ThrowStringIndexOutOfBoundsException(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/StringIndexOutOfBoundsException", msg);
-}
-
-JNIEXPORT void JNICALL
-JNU_ThrowInstantiationException(JNIEnv *env, const char *msg)
-{
- JNU_ThrowByName(env, "java/lang/InstantiationException", msg);
-}
-
/*
* Throw an exception by name, using the string returned by
* getLastErrorString for the detail string. If the last-error
@@ -845,12 +791,6 @@
CHECK_NULL(String_value_ID);
}
-JNIEXPORT jstring
-NewStringPlatform(JNIEnv *env, const char *str)
-{
- return JNU_NewStringPlatform(env, str);
-}
-
JNIEXPORT jstring JNICALL
JNU_NewStringPlatform(JNIEnv *env, const char *str)
{
@@ -1024,54 +964,6 @@
return cls;
}
-JNIEXPORT jclass JNICALL
-JNU_ClassClass(JNIEnv *env)
-{
- static jclass cls = 0;
- if (cls == 0) {
- jclass c;
- if ((*env)->EnsureLocalCapacity(env, 1) < 0)
- return 0;
- c = (*env)->FindClass(env, "java/lang/Class");
- CHECK_NULL_RETURN(c, NULL);
- cls = (*env)->NewGlobalRef(env, c);
- (*env)->DeleteLocalRef(env, c);
- }
- return cls;
-}
-
-JNIEXPORT jclass JNICALL
-JNU_ClassObject(JNIEnv *env)
-{
- static jclass cls = 0;
- if (cls == 0) {
- jclass c;
- if ((*env)->EnsureLocalCapacity(env, 1) < 0)
- return 0;
- c = (*env)->FindClass(env, "java/lang/Object");
- CHECK_NULL_RETURN(c, NULL);
- cls = (*env)->NewGlobalRef(env, c);
- (*env)->DeleteLocalRef(env, c);
- }
- return cls;
-}
-
-JNIEXPORT jclass JNICALL
-JNU_ClassThrowable(JNIEnv *env)
-{
- static jclass cls = 0;
- if (cls == 0) {
- jclass c;
- if ((*env)->EnsureLocalCapacity(env, 1) < 0)
- return 0;
- c = (*env)->FindClass(env, "java/lang/Throwable");
- CHECK_NULL_RETURN(c, NULL);
- cls = (*env)->NewGlobalRef(env, c);
- (*env)->DeleteLocalRef(env, c);
- }
- return cls;
-}
-
JNIEXPORT jint JNICALL
JNU_CopyObjectArray(JNIEnv *env, jobjectArray dst, jobjectArray src,
jint count)
@@ -1110,125 +1002,10 @@
return JNI_ERR;
}
-JNIEXPORT jboolean JNICALL
-JNU_Equals(JNIEnv *env, jobject object1, jobject object2)
-{
- static jmethodID mid = NULL;
- if (mid == NULL) {
- jclass objClazz = JNU_ClassObject(env);
- CHECK_NULL_RETURN(objClazz, JNI_FALSE);
- mid = (*env)->GetMethodID(env, objClazz, "equals",
- "(Ljava/lang/Object;)Z");
- CHECK_NULL_RETURN(mid, JNI_FALSE);
- }
- return (*env)->CallBooleanMethod(env, object1, mid, object2);
-}
-
-
-/************************************************************************
- * Thread calls
- */
-
-static jmethodID Object_waitMID;
-static jmethodID Object_notifyMID;
-static jmethodID Object_notifyAllMID;
-
-JNIEXPORT void JNICALL
-JNU_MonitorWait(JNIEnv *env, jobject object, jlong timeout)
-{
- if (object == NULL) {
- JNU_ThrowNullPointerException(env, "JNU_MonitorWait argument");
- return;
- }
- if (Object_waitMID == NULL) {
- jclass cls = JNU_ClassObject(env);
- if (cls == NULL) {
- return;
- }
- Object_waitMID = (*env)->GetMethodID(env, cls, "wait", "(J)V");
- if (Object_waitMID == NULL) {
- return;
- }
- }
- (*env)->CallVoidMethod(env, object, Object_waitMID, timeout);
-}
-
-JNIEXPORT void JNICALL
-JNU_Notify(JNIEnv *env, jobject object)
-{
- if (object == NULL) {
- JNU_ThrowNullPointerException(env, "JNU_Notify argument");
- return;
- }
- if (Object_notifyMID == NULL) {
- jclass cls = JNU_ClassObject(env);
- if (cls == NULL) {
- return;
- }
- Object_notifyMID = (*env)->GetMethodID(env, cls, "notify", "()V");
- if (Object_notifyMID == NULL) {
- return;
- }
- }
- (*env)->CallVoidMethod(env, object, Object_notifyMID);
-}
-
-JNIEXPORT void JNICALL
-JNU_NotifyAll(JNIEnv *env, jobject object)
-{
- if (object == NULL) {
- JNU_ThrowNullPointerException(env, "JNU_NotifyAll argument");
- return;
- }
- if (Object_notifyAllMID == NULL) {
- jclass cls = JNU_ClassObject(env);
- if (cls == NULL) {
- return;
- }
- Object_notifyAllMID = (*env)->GetMethodID(env, cls,"notifyAll", "()V");
- if (Object_notifyAllMID == NULL) {
- return;
- }
- }
- (*env)->CallVoidMethod(env, object, Object_notifyAllMID);
-}
-
-
/************************************************************************
* Debugging utilities
*/
-JNIEXPORT void JNICALL
-JNU_PrintString(JNIEnv *env, char *hdr, jstring string)
-{
- if (string == NULL) {
- fprintf(stderr, "%s: is NULL\n", hdr);
- } else {
- const char *stringPtr = JNU_GetStringPlatformChars(env, string, 0);
- if (stringPtr == 0)
- return;
- fprintf(stderr, "%s: %s\n", hdr, stringPtr);
- JNU_ReleaseStringPlatformChars(env, string, stringPtr);
- }
-}
-
-JNIEXPORT void JNICALL
-JNU_PrintClass(JNIEnv *env, char* hdr, jobject object)
-{
- if (object == NULL) {
- fprintf(stderr, "%s: object is NULL\n", hdr);
- return;
- } else {
- jclass cls = (*env)->GetObjectClass(env, object);
- jstring clsName = JNU_ToString(env, cls);
- if (clsName == NULL) {
- JNU_PrintString(env, hdr, clsName);
- }
- (*env)->DeleteLocalRef(env, cls);
- (*env)->DeleteLocalRef(env, clsName);
- }
-}
-
JNIEXPORT jstring JNICALL
JNU_ToString(JNIEnv *env, jobject object)
{
@@ -1437,70 +1214,3 @@
}
return result;
}
-
-JNIEXPORT void JNICALL
-JNU_SetStaticFieldByName(JNIEnv *env,
- jboolean *hasException,
- const char *classname,
- const char *name,
- const char *signature,
- ...)
-{
- jclass cls;
- jfieldID fid;
- va_list args;
-
- if ((*env)->EnsureLocalCapacity(env, 3) < 0)
- goto done2;
-
- cls = (*env)->FindClass(env, classname);
- if (cls == 0)
- goto done2;
-
- fid = (*env)->GetStaticFieldID(env, cls, name, signature);
- if (fid == 0)
- goto done1;
-
- va_start(args, signature);
- switch (*signature) {
- case '[':
- case 'L':
- (*env)->SetStaticObjectField(env, cls, fid, va_arg(args, jobject));
- break;
- case 'Z':
- (*env)->SetStaticBooleanField(env, cls, fid, (jboolean)va_arg(args, int));
- break;
- case 'B':
- (*env)->SetStaticByteField(env, cls, fid, (jbyte)va_arg(args, int));
- break;
- case 'C':
- (*env)->SetStaticCharField(env, cls, fid, (jchar)va_arg(args, int));
- break;
- case 'S':
- (*env)->SetStaticShortField(env, cls, fid, (jshort)va_arg(args, int));
- break;
- case 'I':
- (*env)->SetStaticIntField(env, cls, fid, va_arg(args, jint));
- break;
- case 'J':
- (*env)->SetStaticLongField(env, cls, fid, va_arg(args, jlong));
- break;
- case 'F':
- (*env)->SetStaticFloatField(env, cls, fid, (jfloat)va_arg(args, jdouble));
- break;
- case 'D':
- (*env)->SetStaticDoubleField(env, cls, fid, va_arg(args, jdouble));
- break;
-
- default:
- (*env)->FatalError(env, "JNU_SetStaticFieldByName: illegal signature");
- }
- va_end(args);
-
- done1:
- (*env)->DeleteLocalRef(env, cls);
- done2:
- if (hasException) {
- *hasException = (*env)->ExceptionCheck(env);
- }
-}
--- a/src/java.base/share/native/libjava/jni_util.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libjava/jni_util.h Fri Oct 11 12:08:01 2019 +0530
@@ -62,41 +62,14 @@
JNU_ThrowIllegalArgumentException(JNIEnv *env, const char *msg);
JNIEXPORT void JNICALL
-JNU_ThrowIllegalAccessError(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
-JNU_ThrowIllegalAccessException(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
JNU_ThrowInternalError(JNIEnv *env, const char *msg);
JNIEXPORT void JNICALL
JNU_ThrowIOException(JNIEnv *env, const char *msg);
JNIEXPORT void JNICALL
-JNU_ThrowNoSuchFieldException(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
-JNU_ThrowNoSuchMethodException(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
JNU_ThrowClassNotFoundException(JNIEnv *env, const char *msg);
-JNIEXPORT void JNICALL
-JNU_ThrowNumberFormatException(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
-JNU_ThrowNoSuchFieldError(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
-JNU_ThrowNoSuchMethodError(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
-JNU_ThrowStringIndexOutOfBoundsException(JNIEnv *env, const char *msg);
-
-JNIEXPORT void JNICALL
-JNU_ThrowInstantiationException(JNIEnv *env, const char *msg);
-
/* Throw an exception by name, using the string returned by
* getLastErrorString for the detail string. If the last-error
* string is NULL, use the given default detail string.
@@ -120,9 +93,6 @@
JNU_ThrowIOExceptionWithLastError(JNIEnv *env, const char *defaultDetail);
/* Convert between Java strings and i18n C strings */
-JNIEXPORT jstring
-NewStringPlatform(JNIEnv *env, const char *str);
-
JNIEXPORT const char *
GetStringPlatformChars(JNIEnv *env, jstring jstr, jboolean *isCopy);
@@ -139,15 +109,6 @@
JNIEXPORT jclass JNICALL
JNU_ClassString(JNIEnv *env);
-JNIEXPORT jclass JNICALL
-JNU_ClassClass(JNIEnv *env);
-
-JNIEXPORT jclass JNICALL
-JNU_ClassObject(JNIEnv *env);
-
-JNIEXPORT jclass JNICALL
-JNU_ClassThrowable(JNIEnv *env);
-
/* Copy count number of arguments from src to dst. Array bounds
* and ArrayStoreException are checked.
*/
@@ -246,36 +207,6 @@
const char *classname,
const char *name,
const char *sig);
-JNIEXPORT void JNICALL
-JNU_SetStaticFieldByName(JNIEnv *env,
- jboolean *hasException,
- const char *classname,
- const char *name,
- const char *sig,
- ...);
-
-
-/*
- * Calls the .equals method.
- */
-JNIEXPORT jboolean JNICALL
-JNU_Equals(JNIEnv *env, jobject object1, jobject object2);
-
-
-/************************************************************************
- * Thread calls
- *
- * Convenience thread-related calls on the java.lang.Object class.
- */
-
-JNIEXPORT void JNICALL
-JNU_MonitorWait(JNIEnv *env, jobject object, jlong timeout);
-
-JNIEXPORT void JNICALL
-JNU_Notify(JNIEnv *env, jobject object);
-
-JNIEXPORT void JNICALL
-JNU_NotifyAll(JNIEnv *env, jobject object);
/************************************************************************
@@ -349,19 +280,15 @@
} \
} while (0)
#endif /* __cplusplus */
+
/************************************************************************
* Debugging utilities
*/
-JNIEXPORT void JNICALL
-JNU_PrintString(JNIEnv *env, char *hdr, jstring string);
-
-JNIEXPORT void JNICALL
-JNU_PrintClass(JNIEnv *env, char *hdr, jobject object);
-
JNIEXPORT jstring JNICALL
JNU_ToString(JNIEnv *env, jobject object);
+
/*
* Package shorthand for use by native libraries
*/
@@ -402,8 +329,6 @@
FAST_UTF_8
};
-int getFastEncoding();
-
JNIEXPORT void InitializeEncoding(JNIEnv *env, const char *name);
void* getProcessHandle();
--- a/src/java.base/share/native/libjava/verify_stub.c Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 1999, 2003, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-
-/*
- * The real verifier now lives in libverifier.so/verifier.dll.
- *
- * This dummy exists so that HotSpot will run with the new
- * libjava.so/java.dll which is where is it accustomed to finding the
- * verifier.
- */
-
-#include "jni.h"
-
-struct struct_class_size_info;
-typedef struct struct_class_size_info class_size_info;
-
-
-JNIIMPORT jboolean
-VerifyClass(JNIEnv *env, jclass cb, char *buffer, jint len);
-
-JNIIMPORT jboolean
-VerifyClassForMajorVersion(JNIEnv *env, jclass cb, char *buffer, jint len,
- jint major_version);
-
-JNIEXPORT jboolean
-VerifyClassCodes(JNIEnv *env, jclass cb, char *buffer, jint len)
-{
- return VerifyClass(env, cb, buffer, len);
-}
-
-JNIEXPORT jboolean
-VerifyClassCodesForMajorVersion(JNIEnv *env, jclass cb, char *buffer,
- jint len, jint major_version)
-{
- return VerifyClassForMajorVersion(env, cb, buffer, len, major_version);
-}
--- a/src/java.base/share/native/libverify/check_code.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libverify/check_code.c Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1994, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,9 +31,6 @@
Exported function:
jboolean
- VerifyClass(JNIEnv *env, jclass cb, char *message_buffer,
- jint buffer_length)
- jboolean
VerifyClassForMajorVersion(JNIEnv *env, jclass cb, char *message_buffer,
jint buffer_length, jint major_version)
@@ -910,20 +907,6 @@
return result;
}
-#define OLD_FORMAT_MAX_MAJOR_VERSION 48
-
-JNIEXPORT jboolean
-VerifyClass(JNIEnv *env, jclass cb, char *buffer, jint len)
-{
- static int warned = 0;
- if (!warned) {
- jio_fprintf(stdout, "Warning! An old version of jvm is used. This is not supported.\n");
- warned = 1;
- }
- return VerifyClassForMajorVersion(env, cb, buffer, len,
- OLD_FORMAT_MAX_MAJOR_VERSION);
-}
-
static void
verify_field(context_type *context, jclass cb, int field_index)
{
--- a/src/java.base/share/native/libverify/check_format.c Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,278 +0,0 @@
-/*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include <assert.h>
-#include <limits.h>
-#include <setjmp.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "jni.h"
-#include "jvm.h"
-
-typedef unsigned short unicode;
-
-static char *
-skip_over_fieldname(char *name, jboolean slash_okay,
- unsigned int len);
-static char *
-skip_over_field_signature(char *name, jboolean void_okay,
- unsigned int len);
-
-/*
- * Return non-zero if the character is a valid in JVM class name, zero
- * otherwise. The only characters currently disallowed from JVM class
- * names are given in the table below:
- *
- * Character Hex Decimal
- * '.' 0x2e 46
- * '/' 0x2f 47
- * ';' 0x3b 59
- * '[' 0x5b 91
- *
- * (Method names have further restrictions dealing with the '<' and
- * '>' characters.)
- */
-static int isJvmIdentifier(unicode ch) {
- if( ch > 91 || ch < 46 )
- return 1; /* Lowercase ASCII letters are > 91 */
- else { /* 46 <= ch <= 91 */
- if (ch <= 90 && ch >= 60) {
- return 1; /* Uppercase ASCII recognized here */
- } else { /* ch == 91 || 46 <= ch <= 59 */
- if (ch == 91 || ch == 59 || ch <= 47)
- return 0;
- else
- return 1;
- }
- }
-}
-
-static unicode
-next_utf2unicode(char **utfstring_ptr, int * valid)
-{
- unsigned char *ptr = (unsigned char *)(*utfstring_ptr);
- unsigned char ch, ch2, ch3;
- int length = 1; /* default length */
- unicode result = 0x80; /* default bad result; */
- *valid = 1;
- switch ((ch = ptr[0]) >> 4) {
- default:
- result = ch;
- break;
-
- case 0x8: case 0x9: case 0xA: case 0xB: case 0xF:
- /* Shouldn't happen. */
- *valid = 0;
- break;
-
- case 0xC: case 0xD:
- /* 110xxxxx 10xxxxxx */
- if (((ch2 = ptr[1]) & 0xC0) == 0x80) {
- unsigned char high_five = ch & 0x1F;
- unsigned char low_six = ch2 & 0x3F;
- result = (high_five << 6) + low_six;
- length = 2;
- }
- break;
-
- case 0xE:
- /* 1110xxxx 10xxxxxx 10xxxxxx */
- if (((ch2 = ptr[1]) & 0xC0) == 0x80) {
- if (((ch3 = ptr[2]) & 0xC0) == 0x80) {
- unsigned char high_four = ch & 0x0f;
- unsigned char mid_six = ch2 & 0x3f;
- unsigned char low_six = ch3 & 0x3f;
- result = (((high_four << 6) + mid_six) << 6) + low_six;
- length = 3;
- } else {
- length = 2;
- }
- }
- break;
- } /* end of switch */
-
- *utfstring_ptr = (char *)(ptr + length);
- return result;
-}
-
-/* Take pointer to a string. Skip over the longest part of the string that
- * could be taken as a fieldname. Allow '/' if slash_okay is JNI_TRUE.
- *
- * Return a pointer to just past the fieldname. Return NULL if no fieldname
- * at all was found, or in the case of slash_okay being true, we saw
- * consecutive slashes (meaning we were looking for a qualified path but
- * found something that was badly-formed).
- */
-static char *
-skip_over_fieldname(char *name, jboolean slash_okay,
- unsigned int length)
-{
- char *p;
- unicode ch;
- unicode last_ch = 0;
- int valid = 1;
- /* last_ch == 0 implies we are looking at the first char. */
- for (p = name; p != name + length; last_ch = ch) {
- char *old_p = p;
- ch = *p;
- if (ch < 128) {
- p++;
- if (isJvmIdentifier(ch)) {
- continue;
- }
- } else {
- char *tmp_p = p;
- ch = next_utf2unicode(&tmp_p, &valid);
- if (valid == 0)
- return 0;
- p = tmp_p;
- if (isJvmIdentifier(ch)) {
- continue;
- }
- }
-
- if (slash_okay && ch == '/' && last_ch) {
- if (last_ch == '/') {
- return 0; /* Don't permit consecutive slashes */
- }
- } else if (ch == '_' || ch == '$') {
- } else {
- return last_ch ? old_p : 0;
- }
- }
- return last_ch ? p : 0;
-}
-
-/* Take pointer to a string. Skip over the longest part of the string that
- * could be taken as a field signature. Allow "void" if void_okay.
- *
- * Return a pointer to just past the signature. Return NULL if no legal
- * signature is found.
- */
-
-static char *
-skip_over_field_signature(char *name, jboolean void_okay,
- unsigned int length)
-{
- unsigned int array_dim = 0;
- for (;length > 0;) {
- switch (name[0]) {
- case JVM_SIGNATURE_VOID:
- if (!void_okay) return 0;
- /* FALL THROUGH */
- case JVM_SIGNATURE_BOOLEAN:
- case JVM_SIGNATURE_BYTE:
- case JVM_SIGNATURE_CHAR:
- case JVM_SIGNATURE_SHORT:
- case JVM_SIGNATURE_INT:
- case JVM_SIGNATURE_FLOAT:
- case JVM_SIGNATURE_LONG:
- case JVM_SIGNATURE_DOUBLE:
- return name + 1;
-
- case JVM_SIGNATURE_CLASS: {
- /* Skip over the classname, if one is there. */
- char *p =
- skip_over_fieldname(name + 1, JNI_TRUE, --length);
- /* The next character better be a semicolon. */
- if (p && p - name - 1 > 0 && p[0] == ';')
- return p + 1;
- return 0;
- }
-
- case JVM_SIGNATURE_ARRAY:
- array_dim++;
- /* JVMS 2nd ed. 4.10 */
- /* The number of dimensions in an array is limited to 255 ... */
- if (array_dim > 255) {
- return 0;
- }
- /* The rest of what's there better be a legal signature. */
- name++;
- length--;
- void_okay = JNI_FALSE;
- break;
-
- default:
- return 0;
- }
- }
- return 0;
-}
-
-
-/* Used in java/lang/Class.c */
-/* Determine if the specified name is legal
- * UTF name for a classname.
- *
- * Note that this routine expects the internal form of qualified classes:
- * the dots should have been replaced by slashes.
- */
-JNIEXPORT jboolean
-VerifyClassname(char *name, jboolean allowArrayClass)
-{
- size_t s = strlen(name);
- assert(s <= UINT_MAX);
- unsigned int length = (unsigned int)s;
- char *p;
-
- if (length > 0 && name[0] == JVM_SIGNATURE_ARRAY) {
- if (!allowArrayClass) {
- return JNI_FALSE;
- } else {
- /* Everything that's left better be a field signature */
- p = skip_over_field_signature(name, JNI_FALSE, length);
- }
- } else {
- /* skip over the fieldname. Slashes are okay */
- p = skip_over_fieldname(name, JNI_TRUE, length);
- }
- return (p != 0 && p - name == (ptrdiff_t)length);
-}
-
-/*
- * Translates '.' to '/'. Returns JNI_TRUE is any / were present.
- */
-JNIEXPORT jboolean
-VerifyFixClassname(char *name)
-{
- char *p = name;
- jboolean slashesFound = JNI_FALSE;
- int valid = 1;
-
- while (valid != 0 && *p != '\0') {
- if (*p == '/') {
- slashesFound = JNI_TRUE;
- p++;
- } else if (*p == '.') {
- *p++ = '/';
- } else {
- next_utf2unicode(&p, &valid);
- }
- }
-
- return slashesFound && valid != 0;
-}
--- a/src/java.base/share/native/libzip/Deflater.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/share/native/libzip/Deflater.c Fri Oct 11 12:08:01 2019 +0530
@@ -257,7 +257,7 @@
res = doDeflate(env, addr, input, inputLen, output + outputOff, outputLen,
flush, params);
- (*env)->ReleasePrimitiveArrayCritical(env, outputArray, input, 0);
+ (*env)->ReleasePrimitiveArrayCritical(env, outputArray, output, 0);
retVal = checkDeflateStatus(env, addr, inputLen, outputLen, params, res);
return retVal;
--- a/src/java.base/unix/classes/sun/nio/fs/UnixNativeDispatcher.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/unix/classes/sun/nio/fs/UnixNativeDispatcher.java Fri Oct 11 12:08:01 2019 +0530
@@ -119,6 +119,16 @@
static native void fclose(long stream) throws UnixException;
/**
+ * void rewind(FILE* stream);
+ */
+ static native void rewind(long stream) throws UnixException;
+
+ /**
+ * ssize_t getline(char **lineptr, size_t *n, FILE *stream);
+ */
+ static native int getlinelen(long stream) throws UnixException;
+
+ /**
* link(const char* existing, const char* new)
*/
static void link(UnixPath existing, UnixPath newfile) throws UnixException {
--- a/src/java.base/unix/native/libjava/jdk_util_md.c Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include <dlfcn.h>
-#include "jdk_util.h"
-
-int JDK_InitJvmHandle() {
- /* nop */
- return 1;
-}
-
-void* JDK_FindJvmEntry(const char* name) {
- return dlsym(RTLD_DEFAULT, name);
-}
--- a/src/java.base/unix/native/libnio/ch/Net.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/unix/native/libnio/ch/Net.c Fri Oct 11 12:08:01 2019 +0530
@@ -66,34 +66,6 @@
#endif
#endif
-#if defined(_AIX)
- #ifndef IP_BLOCK_SOURCE
- #define IP_BLOCK_SOURCE 58 /* Block data from a given source to a given group */
- #define IP_UNBLOCK_SOURCE 59 /* Unblock data from a given source to a given group */
- #define IP_ADD_SOURCE_MEMBERSHIP 60 /* Join a source-specific group */
- #define IP_DROP_SOURCE_MEMBERSHIP 61 /* Leave a source-specific group */
- #endif
-
- #ifndef MCAST_BLOCK_SOURCE
- #define MCAST_BLOCK_SOURCE 64
- #define MCAST_UNBLOCK_SOURCE 65
- #define MCAST_JOIN_SOURCE_GROUP 66
- #define MCAST_LEAVE_SOURCE_GROUP 67
-
- /* This means we're on AIX 5.3 and 'group_source_req' and 'ip_mreq_source' aren't defined as well */
- struct group_source_req {
- uint32_t gsr_interface;
- struct sockaddr_storage gsr_group;
- struct sockaddr_storage gsr_source;
- };
- struct ip_mreq_source {
- struct in_addr imr_multiaddr; /* IP multicast address of group */
- struct in_addr imr_sourceaddr; /* IP address of source */
- struct in_addr imr_interface; /* local IP address of interface */
- };
- #endif
-#endif /* _AIX */
-
#define COPY_INET6_ADDRESS(env, source, target) \
(*env)->GetByteArrayRegion(env, source, 0, 16, target)
--- a/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c Fri Oct 11 12:08:01 2019 +0530
@@ -403,6 +403,51 @@
}
}
+JNIEXPORT void JNICALL
+Java_sun_nio_fs_UnixNativeDispatcher_rewind(JNIEnv* env, jclass this, jlong stream)
+{
+ FILE* fp = jlong_to_ptr(stream);
+ int saved_errno;
+
+ errno = 0;
+ rewind(fp);
+ saved_errno = errno;
+ if (ferror(fp)) {
+ throwUnixException(env, saved_errno);
+ }
+}
+
+/**
+ * This function returns line length without NUL terminator or -1 on EOF.
+ */
+JNIEXPORT jint JNICALL
+Java_sun_nio_fs_UnixNativeDispatcher_getlinelen(JNIEnv* env, jclass this, jlong stream)
+{
+ FILE* fp = jlong_to_ptr(stream);
+ size_t lineSize = 0;
+ char * lineBuffer = NULL;
+ int saved_errno;
+
+ ssize_t res = getline(&lineBuffer, &lineSize, fp);
+ saved_errno = errno;
+
+ /* Should free lineBuffer no matter result, according to man page */
+ if (lineBuffer != NULL)
+ free(lineBuffer);
+
+ if (feof(fp))
+ return -1;
+
+ /* On successfull return res >= 0, otherwise res is -1 */
+ if (res == -1)
+ throwUnixException(env, saved_errno);
+
+ if (res > INT_MAX)
+ throwUnixException(env, EOVERFLOW);
+
+ return (jint)res;
+}
+
JNIEXPORT jint JNICALL
Java_sun_nio_fs_UnixNativeDispatcher_open0(JNIEnv* env, jclass this,
jlong pathAddress, jint oflags, jint mode)
--- a/src/java.base/windows/classes/sun/nio/fs/WindowsFileAttributes.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/windows/classes/sun/nio/fs/WindowsFileAttributes.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -108,8 +108,9 @@
private static final short OFFSETOF_FIND_DATA_SIZELOW = 32;
private static final short OFFSETOF_FIND_DATA_RESERVED0 = 36;
- // used to adjust values between Windows and java epoch
- private static final long WINDOWS_EPOCH_IN_MICROSECONDS = -11644473600000000L;
+ // used to adjust values between Windows and java epochs
+ private static final long WINDOWS_EPOCH_IN_MICROS = -11644473600000000L;
+ private static final long WINDOWS_EPOCH_IN_100NS = -116444736000000000L;
// indicates if accurate metadata is required (interesting on NTFS only)
private static final boolean ensureAccurateMetadata;
@@ -137,24 +138,23 @@
* since January 1, 1601 to a FileTime.
*/
static FileTime toFileTime(long time) {
- // 100ns -> us
- time /= 10L;
- // adjust to java epoch
- time += WINDOWS_EPOCH_IN_MICROSECONDS;
- return FileTime.from(time, TimeUnit.MICROSECONDS);
+ try {
+ long adjusted = Math.addExact(time, WINDOWS_EPOCH_IN_100NS);
+ long nanos = Math.multiplyExact(adjusted, 100L);
+ return FileTime.from(nanos, TimeUnit.NANOSECONDS);
+ } catch (ArithmeticException e) {
+ long micros = Math.addExact(time/10L, WINDOWS_EPOCH_IN_MICROS);
+ return FileTime.from(micros, TimeUnit.MICROSECONDS);
+ }
}
/**
- * Convert FileTime to 64-bit value representing the number of 100-nanosecond
- * intervals since January 1, 1601.
+ * Convert FileTime to 64-bit value representing the number of
+ * 100-nanosecond intervals since January 1, 1601.
*/
static long toWindowsTime(FileTime time) {
- long value = time.to(TimeUnit.MICROSECONDS);
- // adjust to Windows epoch+= 11644473600000000L;
- value -= WINDOWS_EPOCH_IN_MICROSECONDS;
- // us -> 100ns
- value *= 10L;
- return value;
+ long adjusted = time.to(TimeUnit.NANOSECONDS)/100L;
+ return adjusted - WINDOWS_EPOCH_IN_100NS;
}
/**
--- a/src/java.base/windows/native/libjava/jdk_util_md.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.base/windows/native/libjava/jdk_util_md.c Fri Oct 11 12:08:01 2019 +0530
@@ -28,17 +28,6 @@
#define JVM_DLL "jvm.dll"
-static HMODULE jvm_handle = NULL;
-
-int JDK_InitJvmHandle() {
- jvm_handle = GetModuleHandle(JVM_DLL);
- return (jvm_handle != NULL);
-}
-
-void* JDK_FindJvmEntry(const char* name) {
- return (void*) GetProcAddress(jvm_handle, name);
-}
-
JNIEXPORT HMODULE JDK_LoadSystemLibrary(const char* name) {
HMODULE handle = NULL;
char path[MAX_PATH];
--- a/src/java.datatransfer/share/classes/java/awt/datatransfer/MimeType.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.datatransfer/share/classes/java/awt/datatransfer/MimeType.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -346,9 +346,9 @@
return newObj;
}
- private String primaryType;
- private String subType;
- private MimeTypeParameterList parameters;
+ private transient String primaryType;
+ private transient String subType;
+ private transient MimeTypeParameterList parameters;
// below here be scary parsing related things
--- a/src/java.desktop/unix/native/libawt_xawt/awt/awt_GraphicsEnv.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.desktop/unix/native/libawt_xawt/awt/awt_GraphicsEnv.c Fri Oct 11 12:08:01 2019 +0530
@@ -166,6 +166,10 @@
int id = -1;
VisualID defaultVisual = XVisualIDFromVisual(DefaultVisual(awt_display, vinfo->screen));
defaultConfig = ZALLOC(_AwtGraphicsConfigData);
+ if (defaultConfig == NULL) {
+ XFree(visualList);
+ return NULL;
+ }
for (i = 0; i < visualsMatched; i++) {
memcpy(&defaultConfig->awt_visInfo, &visualList[i], sizeof(XVisualInfo));
defaultConfig->awt_depth = visualList[i].depth;
@@ -447,8 +451,12 @@
} else {
ind = nConfig++;
}
- graphicsConfigs [ind] = ZALLOC (_AwtGraphicsConfigData);
- graphicsConfigs [ind]->awt_depth = pVITrue [i].depth;
+ graphicsConfigs[ind] = ZALLOC (_AwtGraphicsConfigData);
+ if (graphicsConfigs[ind] == NULL) {
+ JNU_ThrowOutOfMemoryError(env, "allocation in getAllConfigs failed");
+ goto cleanup;
+ }
+ graphicsConfigs[ind]->awt_depth = pVITrue [i].depth;
memcpy (&graphicsConfigs [ind]->awt_visInfo, &pVITrue [i],
sizeof (XVisualInfo));
if (xrenderFindVisualFormat != NULL) {
@@ -482,8 +490,12 @@
} else {
ind = nConfig++;
}
- graphicsConfigs [ind] = ZALLOC (_AwtGraphicsConfigData);
- graphicsConfigs [ind]->awt_depth = pVI8p [i].depth;
+ graphicsConfigs[ind] = ZALLOC (_AwtGraphicsConfigData);
+ if (graphicsConfigs[ind] == NULL) {
+ JNU_ThrowOutOfMemoryError(env, "allocation in getAllConfigs failed");
+ goto cleanup;
+ }
+ graphicsConfigs[ind]->awt_depth = pVI8p [i].depth;
memcpy (&graphicsConfigs [ind]->awt_visInfo, &pVI8p [i],
sizeof (XVisualInfo));
}
@@ -495,8 +507,12 @@
} else {
ind = nConfig++;
}
- graphicsConfigs [ind] = ZALLOC (_AwtGraphicsConfigData);
- graphicsConfigs [ind]->awt_depth = pVI12p [i].depth;
+ graphicsConfigs[ind] = ZALLOC (_AwtGraphicsConfigData);
+ if (graphicsConfigs[ind] == NULL) {
+ JNU_ThrowOutOfMemoryError(env, "allocation in getAllConfigs failed");
+ goto cleanup;
+ }
+ graphicsConfigs[ind]->awt_depth = pVI12p [i].depth;
memcpy (&graphicsConfigs [ind]->awt_visInfo, &pVI12p [i],
sizeof (XVisualInfo));
}
@@ -508,8 +524,12 @@
} else {
ind = nConfig++;
}
- graphicsConfigs [ind] = ZALLOC (_AwtGraphicsConfigData);
- graphicsConfigs [ind]->awt_depth = pVI8s [i].depth;
+ graphicsConfigs[ind] = ZALLOC (_AwtGraphicsConfigData);
+ if (graphicsConfigs[ind] == NULL) {
+ JNU_ThrowOutOfMemoryError(env, "allocation in getAllConfigs failed");
+ goto cleanup;
+ }
+ graphicsConfigs[ind]->awt_depth = pVI8s [i].depth;
memcpy (&graphicsConfigs [ind]->awt_visInfo, &pVI8s [i],
sizeof (XVisualInfo));
}
@@ -521,8 +541,12 @@
} else {
ind = nConfig++;
}
- graphicsConfigs [ind] = ZALLOC (_AwtGraphicsConfigData);
- graphicsConfigs [ind]->awt_depth = pVI8gs [i].depth;
+ graphicsConfigs[ind] = ZALLOC (_AwtGraphicsConfigData);
+ if (graphicsConfigs[ind] == NULL) {
+ JNU_ThrowOutOfMemoryError(env, "allocation in getAllConfigs failed");
+ goto cleanup;
+ }
+ graphicsConfigs[ind]->awt_depth = pVI8gs [i].depth;
memcpy (&graphicsConfigs [ind]->awt_visInfo, &pVI8gs [i],
sizeof (XVisualInfo));
}
@@ -534,8 +558,12 @@
} else {
ind = nConfig++;
}
- graphicsConfigs [ind] = ZALLOC (_AwtGraphicsConfigData);
- graphicsConfigs [ind]->awt_depth = pVI8sg [i].depth;
+ graphicsConfigs[ind] = ZALLOC (_AwtGraphicsConfigData);
+ if (graphicsConfigs[ind] == NULL) {
+ JNU_ThrowOutOfMemoryError(env, "allocation in getAllConfigs failed");
+ goto cleanup;
+ }
+ graphicsConfigs[ind]->awt_depth = pVI8sg [i].depth;
memcpy (&graphicsConfigs [ind]->awt_visInfo, &pVI8sg [i],
sizeof (XVisualInfo));
}
@@ -547,12 +575,20 @@
} else {
ind = nConfig++;
}
- graphicsConfigs [ind] = ZALLOC (_AwtGraphicsConfigData);
- graphicsConfigs [ind]->awt_depth = pVI1sg [i].depth;
+ graphicsConfigs[ind] = ZALLOC (_AwtGraphicsConfigData);
+ if (graphicsConfigs[ind] == NULL) {
+ JNU_ThrowOutOfMemoryError(env, "allocation in getAllConfigs failed");
+ goto cleanup;
+ }
+ graphicsConfigs[ind]->awt_depth = pVI1sg [i].depth;
memcpy (&graphicsConfigs [ind]->awt_visInfo, &pVI1sg [i],
sizeof (XVisualInfo));
}
+ screenDataPtr->numConfigs = nConfig;
+ screenDataPtr->configs = graphicsConfigs;
+
+cleanup:
if (n8p != 0)
XFree (pVI8p);
if (n12p != 0)
@@ -566,9 +602,6 @@
if (n1sg != 0)
XFree (pVI1sg);
- screenDataPtr->numConfigs = nConfig;
- screenDataPtr->configs = graphicsConfigs;
-
AWT_UNLOCK ();
}
--- a/src/java.management/share/classes/java/lang/management/ThreadMXBean.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.management/share/classes/java/lang/management/ThreadMXBean.java Fri Oct 11 12:08:01 2019 +0530
@@ -160,7 +160,7 @@
*
* @return an array of {@code long}, each is a thread ID.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
*/
@@ -199,7 +199,7 @@
* it does not exist.
*
* @throws IllegalArgumentException if {@code id <= 0}.
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
*/
@@ -237,7 +237,7 @@
*
* @throws IllegalArgumentException if any element in the input array
* {@code ids} is {@code <= 0}.
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
*/
@@ -284,7 +284,7 @@
*
* @throws IllegalArgumentException if {@code id <= 0}.
* @throws IllegalArgumentException if {@code maxDepth is negative}.
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
*
@@ -337,7 +337,7 @@
* @throws IllegalArgumentException if {@code maxDepth is negative}.
* @throws IllegalArgumentException if any element in the input array
* {@code ids} is {@code <= 0}.
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
*
@@ -360,7 +360,7 @@
* @return {@code true} if thread contention monitoring is enabled;
* {@code false} otherwise.
*
- * @throws java.lang.UnsupportedOperationException if the Java virtual
+ * @throws UnsupportedOperationException if the Java virtual
* machine does not support thread contention monitoring.
*
* @see #isThreadContentionMonitoringSupported
@@ -374,10 +374,10 @@
* @param enable {@code true} to enable;
* {@code false} to disable.
*
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine does not support thread contention monitoring.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("control").
*
@@ -394,7 +394,7 @@
* the current thread has executed in user mode or system mode.
*
* <p>
- * This is a convenient method for local management use and is
+ * This is a convenience method for local management use and is
* equivalent to calling:
* <blockquote><pre>
* {@link #getThreadCpuTime getThreadCpuTime}(Thread.currentThread().getId());
@@ -403,7 +403,7 @@
* @return the total CPU time for the current thread if CPU time
* measurement is enabled; {@code -1} otherwise.
*
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine does not support CPU time measurement for
* the current thread.
*
@@ -421,7 +421,7 @@
* not necessarily nanoseconds accuracy.
*
* <p>
- * This is a convenient method for local management use and is
+ * This is a convenience method for local management use and is
* equivalent to calling:
* <blockquote><pre>
* {@link #getThreadUserTime getThreadUserTime}(Thread.currentThread().getId());
@@ -430,7 +430,7 @@
* @return the user-level CPU time for the current thread if CPU time
* measurement is enabled; {@code -1} otherwise.
*
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine does not support CPU time measurement for
* the current thread.
*
@@ -467,7 +467,7 @@
* {@code -1} otherwise.
*
* @throws IllegalArgumentException if {@code id <= 0}.
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine does not support CPU time measurement for
* other threads.
*
@@ -502,7 +502,7 @@
* {@code -1} otherwise.
*
* @throws IllegalArgumentException if {@code id <= 0}.
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine does not support CPU time measurement for
* other threads.
*
@@ -548,7 +548,7 @@
* @return {@code true} if thread CPU time measurement is enabled;
* {@code false} otherwise.
*
- * @throws java.lang.UnsupportedOperationException if the Java virtual
+ * @throws UnsupportedOperationException if the Java virtual
* machine does not support CPU time measurement for other threads
* nor for the current thread.
*
@@ -564,11 +564,11 @@
* @param enable {@code true} to enable;
* {@code false} to disable.
*
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine does not support CPU time measurement for
* any threads nor for the current thread.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("control").
*
@@ -604,7 +604,7 @@
* @return an array of IDs of the threads that are monitor
* deadlocked, if any; {@code null} otherwise.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
*
@@ -616,7 +616,7 @@
* Resets the peak thread count to the current number of
* live threads.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("control").
*
@@ -642,10 +642,10 @@
* deadlocked waiting for object monitors or ownable synchronizers, if any;
* {@code null} otherwise.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
- * @throws java.lang.UnsupportedOperationException if the Java virtual
+ * @throws UnsupportedOperationException if the Java virtual
* machine does not support monitoring of ownable synchronizer usage.
*
* @see #isSynchronizerUsageSupported
@@ -704,10 +704,10 @@
* information about a thread whose ID is in the corresponding
* element of the input array of IDs.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
- * @throws java.lang.UnsupportedOperationException
+ * @throws UnsupportedOperationException
* <ul>
* <li>if {@code lockedMonitors} is {@code true} but
* the Java virtual machine does not support monitoring
@@ -794,10 +794,10 @@
* element of the input array of IDs.
*
* @throws IllegalArgumentException if {@code maxDepth} is negative.
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
- * @throws java.lang.UnsupportedOperationException
+ * @throws UnsupportedOperationException
* <ul>
* <li>if {@code lockedMonitors} is {@code true} but
* the Java virtual machine does not support monitoring
@@ -835,10 +835,10 @@
*
* @return an array of {@link ThreadInfo} for all live threads.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
- * @throws java.lang.UnsupportedOperationException
+ * @throws UnsupportedOperationException
* <ul>
* <li>if {@code lockedMonitors} is {@code true} but
* the Java virtual machine does not support monitoring
@@ -884,10 +884,10 @@
* @return an array of {@link ThreadInfo} for all live threads.
*
* @throws IllegalArgumentException if {@code maxDepth} is negative.
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("monitor").
- * @throws java.lang.UnsupportedOperationException
+ * @throws UnsupportedOperationException
* <ul>
* <li>if {@code lockedMonitors} is {@code true} but
* the Java virtual machine does not support monitoring
--- a/src/java.management/share/classes/sun/management/ThreadImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.management/share/classes/sun/management/ThreadImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -29,6 +29,7 @@
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import javax.management.ObjectName;
+import java.util.Objects;
/**
* Implementation for java.lang.management.ThreadMXBean as well as providing the
@@ -112,11 +113,15 @@
return cpuTimeEnabled;
}
- protected boolean isThreadAllocatedMemoryEnabled() {
+ private void ensureThreadAllocatedMemorySupported() {
if (!isThreadAllocatedMemorySupported()) {
throw new UnsupportedOperationException(
- "Thread allocated memory measurement is not supported");
+ "Thread allocated memory measurement is not supported.");
}
+ }
+
+ protected boolean isThreadAllocatedMemoryEnabled() {
+ ensureThreadAllocatedMemorySupported();
return allocatedMemoryEnabled;
}
@@ -155,16 +160,18 @@
return getThreadInfo(ids, 0);
}
+ private void verifyThreadId(long id) {
+ if (id <= 0) {
+ throw new IllegalArgumentException(
+ "Invalid thread ID parameter: " + id);
+ }
+ }
+
private void verifyThreadIds(long[] ids) {
- if (ids == null) {
- throw new NullPointerException("Null ids parameter.");
- }
+ Objects.requireNonNull(ids);
for (int i = 0; i < ids.length; i++) {
- if (ids[i] <= 0) {
- throw new IllegalArgumentException(
- "Invalid thread ID parameter: " + ids[i]);
- }
+ verifyThreadId(ids[i]);
}
}
@@ -342,26 +349,41 @@
}
}
+ protected long getCurrentThreadAllocatedBytes() {
+ if (isThreadAllocatedMemoryEnabled()) {
+ return getThreadAllocatedMemory0(0);
+ }
+ return -1;
+ }
+
+ private boolean verifyThreadAllocatedMemory(long id) {
+ verifyThreadId(id);
+ return isThreadAllocatedMemoryEnabled();
+ }
+
protected long getThreadAllocatedBytes(long id) {
- long[] ids = new long[1];
- ids[0] = id;
- final long[] sizes = getThreadAllocatedBytes(ids);
- return sizes[0];
+ boolean verified = verifyThreadAllocatedMemory(id);
+
+ if (verified) {
+ return getThreadAllocatedMemory0(
+ Thread.currentThread().getId() == id ? 0 : id);
+ }
+ return -1;
}
private boolean verifyThreadAllocatedMemory(long[] ids) {
verifyThreadIds(ids);
-
- // check if Thread allocated memory measurement is supported.
- if (!isThreadAllocatedMemorySupported()) {
- throw new UnsupportedOperationException(
- "Thread allocated memory measurement is not supported.");
- }
-
return isThreadAllocatedMemoryEnabled();
}
protected long[] getThreadAllocatedBytes(long[] ids) {
+ Objects.requireNonNull(ids);
+
+ if (ids.length == 1) {
+ long size = getThreadAllocatedBytes(ids[0]);
+ return new long[] { size };
+ }
+
boolean verified = verifyThreadAllocatedMemory(ids);
long[] sizes = new long[ids.length];
@@ -374,10 +396,7 @@
}
protected void setThreadAllocatedMemoryEnabled(boolean enable) {
- if (!isThreadAllocatedMemorySupported()) {
- throw new UnsupportedOperationException(
- "Thread allocated memory measurement is not supported.");
- }
+ ensureThreadAllocatedMemorySupported();
Util.checkControlAccess();
synchronized (this) {
@@ -511,6 +530,7 @@
private static native void getThreadTotalCpuTime1(long[] ids, long[] result);
private static native long getThreadUserCpuTime0(long id);
private static native void getThreadUserCpuTime1(long[] ids, long[] result);
+ private static native long getThreadAllocatedMemory0(long id);
private static native void getThreadAllocatedMemory1(long[] ids, long[] result);
private static native void setThreadCpuTimeEnabled0(boolean enable);
private static native void setThreadAllocatedMemoryEnabled0(boolean enable);
--- a/src/java.management/share/native/libmanagement/ThreadImpl.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.management/share/native/libmanagement/ThreadImpl.c Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -94,6 +94,13 @@
JNI_FALSE /* user */);
}
+JNIEXPORT jlong JNICALL
+Java_sun_management_ThreadImpl_getThreadAllocatedMemory0
+ (JNIEnv *env, jclass cls, jlong tid)
+{
+ return jmm_interface->GetOneThreadAllocatedMemory(env, tid);
+}
+
JNIEXPORT void JNICALL
Java_sun_management_ThreadImpl_getThreadAllocatedMemory1
(JNIEnv *env, jclass cls, jlongArray ids, jlongArray sizeArray)
--- a/src/java.naming/share/classes/com/sun/jndi/toolkit/ctx/Continuation.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.naming/share/classes/com/sun/jndi/toolkit/ctx/Continuation.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,7 @@
/**
* Whether links were encountered.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected Object followingLink = null;
/**
@@ -71,6 +72,7 @@
* The last resolved context. Used to set the "AltNameCtx" in a
* CannotProceedException.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected Context resolvedContext = null;
/**
--- a/src/java.naming/share/classes/javax/naming/Binding.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.naming/share/classes/javax/naming/Binding.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@
* @see #getObject
* @see #setObject
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object boundObj;
/**
--- a/src/java.naming/share/classes/javax/naming/CannotProceedException.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.naming/share/classes/javax/naming/CannotProceedException.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -127,6 +127,7 @@
* @see #altName
* @see javax.naming.spi.ObjectFactory#getObjectInstance
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected Context altNameCtx = null;
/**
--- a/src/java.naming/share/classes/javax/naming/LinkException.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.naming/share/classes/javax/naming/LinkException.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -85,6 +85,7 @@
* @see #getLinkResolvedObj
* @see #setLinkResolvedObj
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected Object linkResolvedObj;
/**
--- a/src/java.naming/share/classes/javax/naming/NamingException.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.naming/share/classes/javax/naming/NamingException.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,6 +79,7 @@
* @see #getResolvedObj
* @see #setResolvedObj
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected Object resolvedObj;
/**
* Contains the remaining name that has not been resolved yet.
--- a/src/java.naming/share/classes/javax/naming/event/NamingEvent.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.naming/share/classes/javax/naming/event/NamingEvent.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2000, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -122,6 +122,7 @@
* Contains information about the change that generated this event.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected Object changeInfo;
/**
--- a/src/java.naming/share/classes/javax/naming/spi/ResolveResult.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.naming/share/classes/javax/naming/spi/ResolveResult.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@
* Constructors should always initialize this.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected Object resolvedObj;
/**
* Field containing the remaining name yet to be resolved.
--- a/src/java.prefs/share/classes/java/util/prefs/NodeChangeEvent.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.prefs/share/classes/java/util/prefs/NodeChangeEvent.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,10 +46,8 @@
public class NodeChangeEvent extends java.util.EventObject {
/**
* The node that was added or removed.
- *
- * @serial
*/
- private Preferences child;
+ private transient Preferences child;
/**
* Constructs a new {@code NodeChangeEvent} instance.
--- a/src/java.rmi/share/classes/java/rmi/activation/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.rmi/share/classes/java/rmi/activation/package-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,23 +1,23 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- * <p>
+ *
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
- * <p>
+ *
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
- * <p>
+ *
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- * <p>
+ *
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
--- a/src/java.rmi/share/classes/java/rmi/dgc/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.rmi/share/classes/java/rmi/dgc/package-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,23 +1,23 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- * <p>
+ *
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
- * <p>
+ *
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
- * <p>
+ *
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- * <p>
+ *
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
--- a/src/java.rmi/share/classes/java/rmi/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.rmi/share/classes/java/rmi/package-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,23 +1,23 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- * <p>
+ *
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
- * <p>
+ *
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
- * <p>
+ *
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- * <p>
+ *
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
--- a/src/java.rmi/share/classes/java/rmi/registry/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.rmi/share/classes/java/rmi/registry/package-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,23 +1,23 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- * <p>
+ *
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
- * <p>
+ *
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
- * <p>
+ *
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- * <p>
+ *
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
--- a/src/java.rmi/share/classes/java/rmi/server/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.rmi/share/classes/java/rmi/server/package-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,23 +1,23 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- * <p>
+ *
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
- * <p>
+ *
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
- * <p>
+ *
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- * <p>
+ *
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
--- a/src/java.security.jgss/macosx/native/libosxkrb5/nativeccache.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/macosx/native/libosxkrb5/nativeccache.c Fri Oct 11 12:08:01 2019 +0530
@@ -43,7 +43,6 @@
* Statics for this module
*/
-static jclass derValueClass = NULL;
static jclass ticketClass = NULL;
static jclass principalNameClass = NULL;
static jclass encryptionKeyClass = NULL;
@@ -54,7 +53,6 @@
static jclass hostAddressClass = NULL;
static jclass hostAddressesClass = NULL;
-static jmethodID derValueConstructor = 0;
static jmethodID ticketConstructor = 0;
static jmethodID principalNameConstructor = 0;
static jmethodID encryptionKeyConstructor = 0;
@@ -108,9 +106,6 @@
principalNameClass = FindClass(env, "sun/security/krb5/PrincipalName");
if (principalNameClass == NULL) return JNI_ERR;
- derValueClass = FindClass(env, "sun/security/util/DerValue");
- if (derValueClass == NULL) return JNI_ERR;
-
encryptionKeyClass = FindClass(env, "sun/security/krb5/EncryptionKey");
if (encryptionKeyClass == NULL) return JNI_ERR;
@@ -132,13 +127,7 @@
hostAddressesClass = FindClass(env,"sun/security/krb5/internal/HostAddresses");
if (hostAddressesClass == NULL) return JNI_ERR;
- derValueConstructor = (*env)->GetMethodID(env, derValueClass, "<init>", "([B)V");
- if (derValueConstructor == 0) {
- printf("Couldn't find DerValue constructor\n");
- return JNI_ERR;
- }
-
- ticketConstructor = (*env)->GetMethodID(env, ticketClass, "<init>", "(Lsun/security/util/DerValue;)V");
+ ticketConstructor = (*env)->GetMethodID(env, ticketClass, "<init>", "([B)V");
if (ticketConstructor == 0) {
printf("Couldn't find Ticket constructor\n");
return JNI_ERR;
@@ -204,9 +193,6 @@
if (ticketClass != NULL) {
(*env)->DeleteWeakGlobalRef(env,ticketClass);
}
- if (derValueClass != NULL) {
- (*env)->DeleteWeakGlobalRef(env,derValueClass);
- }
if (principalNameClass != NULL) {
(*env)->DeleteWeakGlobalRef(env,principalNameClass);
}
@@ -421,11 +407,9 @@
jobject BuildTicket(JNIEnv *env, krb5_data *encodedTicket)
{
- /* To build a Ticket, we first need to build a DerValue out of the EncodedTicket.
- * But before we can do that, we need to make a byte array out of the ET.
- */
+ // To build a Ticket, we need to make a byte array out of the EncodedTicket.
- jobject derValue, ticket;
+ jobject ticket;
jbyteArray ary;
ary = (*env)->NewByteArray(env, encodedTicket->length);
@@ -439,19 +423,12 @@
return (jobject) NULL;
}
- derValue = (*env)->NewObject(env, derValueClass, derValueConstructor, ary);
+ ticket = (*env)->NewObject(env, ticketClass, ticketConstructor, ary);
if ((*env)->ExceptionCheck(env)) {
(*env)->DeleteLocalRef(env, ary);
return (jobject) NULL;
}
-
(*env)->DeleteLocalRef(env, ary);
- ticket = (*env)->NewObject(env, ticketClass, ticketConstructor, derValue);
- if ((*env)->ExceptionCheck(env)) {
- (*env)->DeleteLocalRef(env, derValue);
- return (jobject) NULL;
- }
- (*env)->DeleteLocalRef(env, derValue);
return ticket;
}
--- a/src/java.security.jgss/share/classes/sun/security/jgss/krb5/Krb5Context.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/krb5/Krb5Context.java Fri Oct 11 12:08:01 2019 +0530
@@ -1391,6 +1391,7 @@
static class KerberosSessionKey implements Key {
private static final long serialVersionUID = 699307378954123869L;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final EncryptionKey key;
KerberosSessionKey(EncryptionKey key) {
--- a/src/java.security.jgss/share/classes/sun/security/jgss/krb5/Krb5InitCredential.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/krb5/Krb5InitCredential.java Fri Oct 11 12:08:01 2019 +0530
@@ -53,7 +53,9 @@
private static final long serialVersionUID = 7723415700837898232L;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Krb5NameElement name;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Credentials krb5Credentials;
private Krb5InitCredential(Krb5NameElement name,
--- a/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSCredElement.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSCredElement.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,6 +61,7 @@
}
// Construct delegation cred using the actual context mech and srcName
+ // Warning: called by NativeUtil.c
GSSCredElement(long pCredentials, GSSNameElement srcName, Oid mech)
throws GSSException {
pCred = pCredentials;
--- a/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSLibStub.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSLibStub.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,7 @@
class GSSLibStub {
private Oid mech;
- private long pMech;
+ private long pMech; // Warning: used by NativeUtil.c
/**
* Initialization routine to dynamically load function pointers.
--- a/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSNameElement.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSNameElement.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -97,6 +97,7 @@
printableName = "<DEFAULT ACCEPTOR>";
}
+ // Warning: called by NativeUtil.c
GSSNameElement(long pNativeName, GSSLibStub stub) throws GSSException {
assert(stub != null);
if (pNativeName == 0) {
--- a/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/NativeGSSContext.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/NativeGSSContext.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,20 +59,22 @@
private static final int NUM_OF_INQUIRE_VALUES = 6;
+ // Warning: The following 9 fields are used by NativeUtil.c
private long pContext = 0; // Pointer to the gss_ctx_id_t structure
private GSSNameElement srcName;
private GSSNameElement targetName;
- private GSSCredElement cred;
- private GSSCredElement disposeCred;
private boolean isInitiator;
private boolean isEstablished;
+ private GSSCredElement delegatedCred;
+ private int flags;
+ private int lifetime = GSSCredential.DEFAULT_LIFETIME;
private Oid actualMech; // Assigned during context establishment
+ private GSSCredElement cred;
+ private GSSCredElement disposeCred;
+
private ChannelBinding cb;
- private GSSCredElement delegatedCred;
private GSSCredElement disposeDelegatedCred;
- private int flags;
- private int lifetime = GSSCredential.DEFAULT_LIFETIME;
private final GSSLibStub cStub;
private boolean skipDelegPermCheck;
@@ -231,6 +233,7 @@
}
// Constructor for imported context
+ // Warning: called by NativeUtil.c
NativeGSSContext(long pCtxt, GSSLibStub stub) throws GSSException {
assert(pContext != 0);
pContext = pCtxt;
--- a/src/java.security.jgss/share/classes/sun/security/krb5/Credentials.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/Credentials.java Fri Oct 11 12:08:01 2019 +0530
@@ -88,6 +88,7 @@
this.authzData = authzData;
}
+ // Warning: called by NativeCreds.c and nativeccache.c
public Credentials(Ticket new_ticket,
PrincipalName new_client,
PrincipalName new_client_alias,
--- a/src/java.security.jgss/share/classes/sun/security/krb5/EncryptionKey.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/EncryptionKey.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -218,8 +218,8 @@
* credential cache file.
*
*/
- // Used in JSSE (KerberosWrapper), Credentials,
- // javax.security.auth.kerberos.KeyImpl
+ // Used in Credentials, and javax.security.auth.kerberos.KeyImpl
+ // Warning: called by NativeCreds.c and nativeccache.c
public EncryptionKey(int keyType,
byte[] keyValue) {
this(keyValue, keyType, null);
--- a/src/java.security.jgss/share/classes/sun/security/krb5/PrincipalName.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/PrincipalName.java Fri Oct 11 12:08:01 2019 +0530
@@ -158,7 +158,7 @@
this.realmDeduced = false;
}
- // This method is called by Windows NativeCred.c
+ // Warning: called by NativeCreds.c
public PrincipalName(String[] nameParts, String realm) throws RealmException {
this(KRB_NT_UNKNOWN, nameParts, new Realm(realm));
}
@@ -484,6 +484,7 @@
}
}
+ // Warning: called by nativeccache.c
public PrincipalName(String name, int type) throws RealmException {
this(name, type, (String)null);
}
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/HostAddress.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/HostAddress.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -165,6 +165,8 @@
/**
* Creates a HostAddress from the specified address and address type.
*
+ * Warning: called by nativeccache.c.
+ *
* @param new_addrType the value of the address type which matches the defined
* address family constants in the Berkeley Standard
* Distributions of Unix.
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/HostAddresses.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/HostAddresses.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,6 +68,7 @@
private HostAddress[] addresses = null;
private volatile int hashCode = 0;
+ // Warning: called by nativeccache.c
public HostAddresses(HostAddress[] new_addresses) throws IOException {
if (new_addresses != null) {
addresses = new HostAddress[new_addresses.length];
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/KRBError.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/KRBError.java Fri Oct 11 12:08:01 2019 +0530
@@ -79,7 +79,9 @@
* <a href="http://www.ietf.org/rfc/rfc4120.txt">
* http://www.ietf.org/rfc/rfc4120.txt</a>.
*/
-
+// The instance fields not statically typed as Serializable are ASN.1
+// encoded and written by the writeObject method.
+@SuppressWarnings("serial")
public class KRBError implements java.io.Serializable {
static final long serialVersionUID = 3643809337475284503L;
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/KerberosTime.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/KerberosTime.java Fri Oct 11 12:08:01 2019 +0530
@@ -88,8 +88,7 @@
this(time, 0);
}
- // This constructor is used in the native code
- // src/windows/native/sun/security/krb5/NativeCreds.c
+ // Warning: called by NativeCreds.c and nativeccache.c
public KerberosTime(String time) throws Asn1Exception {
this(toKerberosTime(time), 0);
}
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/Krb5.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/Krb5.java Fri Oct 11 12:08:01 2019 +0530
@@ -309,7 +309,7 @@
return errMsgList.get(i);
}
-
+ // Warning: used by NativeCreds.c
public static final boolean DEBUG = GetBooleanAction
.privilegedGetProperty("sun.security.krb5.debug");
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/Ticket.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/Ticket.java Fri Oct 11 12:08:01 2019 +0530
@@ -83,6 +83,7 @@
encPart = new_encPart;
}
+ // Warning: called by NativeCreds.c and nativeccache.c
public Ticket(byte[] data) throws Asn1Exception,
RealmException, KrbApErrException, IOException {
init(new DerValue(data));
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/TicketFlags.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/TicketFlags.java Fri Oct 11 12:08:01 2019 +0530
@@ -67,6 +67,7 @@
}
}
+ // Warning: called by NativeCreds.c and nativeccache.c
public TicketFlags(int size, byte[] data) throws Asn1Exception {
super(size, data);
if ((size > data.length * BITS_PER_UNIT) || (size > Krb5.TKT_OPTS_MAX + 1))
--- a/src/java.security.jgss/windows/native/libw2k_lsa_auth/NativeCreds.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.security.jgss/windows/native/libw2k_lsa_auth/NativeCreds.c Fri Oct 11 12:08:01 2019 +0530
@@ -54,7 +54,6 @@
* Library-wide static references
*/
-jclass derValueClass = NULL;
jclass ticketClass = NULL;
jclass principalNameClass = NULL;
jclass encryptionKeyClass = NULL;
@@ -62,7 +61,6 @@
jclass kerberosTimeClass = NULL;
jclass javaLangStringClass = NULL;
-jmethodID derValueConstructor = 0;
jmethodID ticketConstructor = 0;
jmethodID principalNameConstructor = 0;
jmethodID encryptionKeyConstructor = 0;
@@ -172,24 +170,6 @@
printf("LSA: Made NewWeakGlobalRef\n");
}
- cls = (*env)->FindClass(env,"sun/security/util/DerValue");
-
- if (cls == NULL) {
- printf("LSA: Couldn't find DerValue\n");
- return JNI_ERR;
- }
- if (native_debug) {
- printf("LSA: Found DerValue\n");
- }
-
- derValueClass = (*env)->NewWeakGlobalRef(env,cls);
- if (derValueClass == NULL) {
- return JNI_ERR;
- }
- if (native_debug) {
- printf("LSA: Made NewWeakGlobalRef\n");
- }
-
cls = (*env)->FindClass(env,"sun/security/krb5/EncryptionKey");
if (cls == NULL) {
@@ -262,18 +242,8 @@
printf("LSA: Made NewWeakGlobalRef\n");
}
- derValueConstructor = (*env)->GetMethodID(env, derValueClass,
- "<init>", "([B)V");
- if (derValueConstructor == 0) {
- printf("LSA: Couldn't find DerValue constructor\n");
- return JNI_ERR;
- }
- if (native_debug) {
- printf("LSA: Found DerValue constructor\n");
- }
-
ticketConstructor = (*env)->GetMethodID(env, ticketClass,
- "<init>", "(Lsun/security/util/DerValue;)V");
+ "<init>", "([B)V");
if (ticketConstructor == 0) {
printf("LSA: Couldn't find Ticket constructor\n");
return JNI_ERR;
@@ -347,9 +317,6 @@
if (ticketClass != NULL) {
(*env)->DeleteWeakGlobalRef(env,ticketClass);
}
- if (derValueClass != NULL) {
- (*env)->DeleteWeakGlobalRef(env,derValueClass);
- }
if (principalNameClass != NULL) {
(*env)->DeleteWeakGlobalRef(env,principalNameClass);
}
@@ -897,11 +864,9 @@
jobject BuildTicket(JNIEnv *env, PUCHAR encodedTicket, ULONG encodedTicketSize) {
- /* To build a Ticket, we first need to build a DerValue out of the EncodedTicket.
- * But before we can do that, we need to make a byte array out of the ET.
- */
+ // To build a Ticket, we need to make a byte array out of the EncodedTicket.
- jobject derValue, ticket;
+ jobject ticket;
jbyteArray ary;
ary = (*env)->NewByteArray(env,encodedTicketSize);
@@ -916,19 +881,12 @@
return (jobject) NULL;
}
- derValue = (*env)->NewObject(env, derValueClass, derValueConstructor, ary);
+ ticket = (*env)->NewObject(env, ticketClass, ticketConstructor, ary);
if ((*env)->ExceptionOccurred(env)) {
(*env)->DeleteLocalRef(env, ary);
return (jobject) NULL;
}
-
(*env)->DeleteLocalRef(env, ary);
- ticket = (*env)->NewObject(env, ticketClass, ticketConstructor, derValue);
- if ((*env)->ExceptionOccurred(env)) {
- (*env)->DeleteLocalRef(env, derValue);
- return (jobject) NULL;
- }
- (*env)->DeleteLocalRef(env, derValue);
return ticket;
}
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/BaseRowSet.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/BaseRowSet.java Fri Oct 11 12:08:01 2019 +0530
@@ -330,6 +330,7 @@
* specified in the <code>ResultSet</code> interface.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected java.io.InputStream binaryStream;
/**
@@ -338,6 +339,7 @@
* which is specified in the <code>ResultSet</code> interface.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected java.io.InputStream unicodeStream;
/**
@@ -346,6 +348,7 @@
* which is specified in the <code>ResultSet</code> interface.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected java.io.InputStream asciiStream;
/**
@@ -354,6 +357,7 @@
* which is specified in the <code>ResultSet</code> interface.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
protected java.io.Reader charStream;
/**
@@ -506,6 +510,7 @@
* custom mapping of user-defined types.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Map<String, Class<?>> map;
/**
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialArray.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialArray.java Fri Oct 11 12:08:01 2019 +0530
@@ -66,6 +66,7 @@
* in the SQL <code>ARRAY</code> value.
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object[] elements;
/**
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialBlob.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialBlob.java Fri Oct 11 12:08:01 2019 +0530
@@ -73,6 +73,7 @@
* The internal representation of the <code>Blob</code> object on which this
* <code>SerialBlob</code> object is based.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable; checked in writeObject
private Blob blob;
/**
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialClob.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialClob.java Fri Oct 11 12:08:01 2019 +0530
@@ -68,6 +68,7 @@
* Internal Clob representation if SerialClob is initialized with a
* Clob. Null if SerialClob is initialized with a char[].
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable; checked in writeObject
private Clob clob;
/**
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialJavaObject.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialJavaObject.java Fri Oct 11 12:08:01 2019 +0530
@@ -61,6 +61,7 @@
/**
* Placeholder for object to be serialized.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object obj;
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialRef.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialRef.java Fri Oct 11 12:08:01 2019 +0530
@@ -56,11 +56,13 @@
/**
* This will store the type <code>Ref</code> as an <code>Object</code>.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object object;
/**
* Private copy of the Ref reference.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable; checked in writeObject
private Ref reference;
/**
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialStruct.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/serial/SerialStruct.java Fri Oct 11 12:08:01 2019 +0530
@@ -80,6 +80,7 @@
*
* @serial
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object attribs[];
/**
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/spi/SyncProviderException.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/spi/SyncProviderException.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,7 @@
* this <code>SyncProviderException</code> object will return when its
* <code>getSyncResolver</code> method is called.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private SyncResolver syncResolver = null;
/**
--- a/src/java.sql/share/classes/java/sql/SQLClientInfoException.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql/share/classes/java/sql/SQLClientInfoException.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
-
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Map<String, ClientInfoStatus> failedProperties;
/**
--- a/src/java.sql/share/classes/javax/sql/StatementEvent.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql/share/classes/javax/sql/StatementEvent.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
static final long serialVersionUID = -8089573731826608315L;
private SQLException exception;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private PreparedStatement statement;
/**
--- a/src/java.sql/share/classes/javax/sql/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.sql/share/classes/javax/sql/package-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,23 +1,23 @@
/**
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- * <p>
+ *
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
- * <p>
+ *
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
- * <p>
+ *
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- * <p>
+ *
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
--- a/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/dom/CoreDocumentImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/dom/CoreDocumentImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -83,7 +83,7 @@
* @author Andy Clark, IBM
* @author Ralf Pfeiffer, IBM
* @since PR-DOM-Level-1-19980818.
- * @LastModified: Nov 2018
+ * @LastModified: Sept 2019
*/
public class CoreDocumentImpl
extends ParentNode implements Document {
@@ -862,6 +862,9 @@
* the version number of this document.
*/
public void setXmlVersion(String value) {
+ if (value == null) {
+ return;
+ }
if(value.equals("1.0") || value.equals("1.1")){
//we need to change the flag value only --
// when the version set is different than already set.
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/collect/FileSupport.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/collect/FileSupport.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,7 @@
try {
String name = path.toAbsolutePath().toString();
name = name.replace('\\', '/');
- return new URI("jar:file:///" + name + "!/");
+ return new URI("jar:file", null, "///" + name + "!/", null);
} catch (URISyntaxException e) {
throw new InternalError(e);
}
--- a/src/jdk.compiler/share/classes/com/sun/tools/doclint/DocLint.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/doclint/DocLint.java Fri Oct 11 12:08:01 2019 +0530
@@ -112,7 +112,7 @@
}
final String code;
- final Object[] args;
+ final transient Object[] args;
}
/**
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Scope.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Scope.java Fri Oct 11 12:08:01 2019 +0530
@@ -389,7 +389,7 @@
Entry e = table[hash];
Assert.check(e == elems, elems.sym);
table[hash] = elems.shadowed;
- elems = elems.sibling;
+ elems = elems.nextSibling;
}
Assert.check(next.shared > 0);
next.shared--;
@@ -466,15 +466,15 @@
}
// remove e from elems and sibling list
- te = elems;
- if (te == e)
- elems = e.sibling;
- else while (true) {
- if (te.sibling == e) {
- te.sibling = e.sibling;
- break;
- }
- te = te.sibling;
+ if (elems == e) {
+ elems = e.nextSibling;
+ if (elems != null)
+ elems.prevSibling = null;
+ } else {
+ Assert.check(e.prevSibling != null, e.sym);
+ e.prevSibling.nextSibling = e.nextSibling;
+ if (e.nextSibling != null)
+ e.nextSibling.prevSibling = e.prevSibling;
}
removeCount++;
@@ -597,7 +597,7 @@
private Symbol doNext() {
Symbol sym = (currEntry == null ? null : currEntry.sym);
if (currEntry != null) {
- currEntry = currEntry.sibling;
+ currEntry = currEntry.nextSibling;
}
update();
return sym;
@@ -617,7 +617,7 @@
void skipToNextMatchingEntry() {
while (currEntry != null && sf != null && !sf.accepts(currEntry.sym)) {
- currEntry = currEntry.sibling;
+ currEntry = currEntry.nextSibling;
}
}
};
@@ -677,7 +677,7 @@
result.append("Scope[");
for (ScopeImpl s = this; s != null ; s = s.next) {
if (s != this) result.append(" | ");
- for (Entry e = s.elems; e != null; e = e.sibling) {
+ for (Entry e = s.elems; e != null; e = e.nextSibling) {
if (e != s.elems) result.append(", ");
result.append(e.sym);
}
@@ -702,18 +702,24 @@
/** Next entry in same scope.
*/
- public Entry sibling;
+ public Entry nextSibling;
+
+ /** Prev entry in same scope.
+ */
+ public Entry prevSibling;
/** The entry's scope.
* scope == null iff this == sentinel
*/
public ScopeImpl scope;
- public Entry(Symbol sym, Entry shadowed, Entry sibling, ScopeImpl scope) {
+ public Entry(Symbol sym, Entry shadowed, Entry nextSibling, ScopeImpl scope) {
this.sym = sym;
this.shadowed = shadowed;
- this.sibling = sibling;
+ this.nextSibling = nextSibling;
this.scope = scope;
+ if (nextSibling != null)
+ nextSibling.prevSibling = this;
}
/** Return next entry with the same name as this entry, proceeding
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java Fri Oct 11 12:08:01 2019 +0530
@@ -2345,14 +2345,14 @@
public static class CompletionFailure extends RuntimeException {
private static final long serialVersionUID = 0;
- public final DeferredCompletionFailureHandler dcfh;
- public Symbol sym;
+ public final transient DeferredCompletionFailureHandler dcfh;
+ public transient Symbol sym;
/** A diagnostic object describing the failure
*/
- private JCDiagnostic diag;
+ private transient JCDiagnostic diag;
- private Supplier<JCDiagnostic> diagSupplier;
+ private transient Supplier<JCDiagnostic> diagSupplier;
public CompletionFailure(Symbol sym, Supplier<JCDiagnostic> diagSupplier, DeferredCompletionFailureHandler dcfh) {
this.dcfh = dcfh;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java Fri Oct 11 12:08:01 2019 +0530
@@ -639,7 +639,7 @@
public static class FunctionDescriptorLookupError extends RuntimeException {
private static final long serialVersionUID = 0;
- JCDiagnostic diagnostic;
+ transient JCDiagnostic diagnostic;
FunctionDescriptorLookupError() {
this.diagnostic = null;
@@ -3745,12 +3745,9 @@
return cl1;
} else if (shouldSkip.test(cl1.head, cl2.head)) {
return union(cl1.tail, cl2.tail, shouldSkip).prepend(cl1.head);
- } else if (cl1.head.tsym.precedes(cl2.head.tsym, this)) {
- return union(cl1.tail, cl2, shouldSkip).prepend(cl1.head);
} else if (cl2.head.tsym.precedes(cl1.head.tsym, this)) {
return union(cl1, cl2.tail, shouldSkip).prepend(cl2.head);
} else {
- // unrelated types
return union(cl1.tail, cl2, shouldSkip).prepend(cl1.head);
}
}
@@ -5002,7 +4999,7 @@
public static class InvalidSignatureException extends RuntimeException {
private static final long serialVersionUID = 0;
- private final Type type;
+ private final transient Type type;
InvalidSignatureException(Type type) {
this.type = type;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java Fri Oct 11 12:08:01 2019 +0530
@@ -443,7 +443,7 @@
private static class BreakAttr extends RuntimeException {
static final long serialVersionUID = -6924771130405446405L;
- private Env<AttrContext> env;
+ private transient Env<AttrContext> env;
private BreakAttr(Env<AttrContext> env) {
this.env = env;
}
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/CompileStates.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/CompileStates.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,7 +77,7 @@
private static final long serialVersionUID = 1812267524140424433L;
- protected Context context;
+ protected transient Context context;
public CompileStates(Context context) {
this.context = context;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Infer.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Infer.java Fri Oct 11 12:08:01 2019 +0530
@@ -137,7 +137,7 @@
public static class InferenceException extends InapplicableMethodException {
private static final long serialVersionUID = 0;
- List<JCDiagnostic> messages = List.nil();
+ transient List<JCDiagnostic> messages = List.nil();
InferenceException() {
super(null);
@@ -1321,7 +1321,7 @@
public static class NodeNotFoundException extends RuntimeException {
private static final long serialVersionUID = 0;
- InferenceGraph graph;
+ transient InferenceGraph graph;
public NodeNotFoundException(InferenceGraph graph) {
this.graph = graph;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Resolve.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Resolve.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1387,7 +1387,7 @@
public static class InapplicableMethodException extends RuntimeException {
private static final long serialVersionUID = 0;
- JCDiagnostic diagnostic;
+ transient JCDiagnostic diagnostic;
InapplicableMethodException(JCDiagnostic diag) {
this.diagnostic = diag;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/file/Locations.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/file/Locations.java Fri Oct 11 12:08:01 2019 +0530
@@ -233,7 +233,7 @@
}
public void setMultiReleaseValue(String multiReleaseValue) {
- fsEnv = Collections.singletonMap("multi-release", multiReleaseValue);
+ fsEnv = Collections.singletonMap("releaseVersion", multiReleaseValue);
}
private boolean contains(Collection<Path> searchPath, Path file) throws IOException {
@@ -280,7 +280,7 @@
private static final long serialVersionUID = 0;
private boolean expandJarClassPaths = false;
- private final Set<Path> canonicalValues = new HashSet<>();
+ private final transient Set<Path> canonicalValues = new HashSet<>();
public SearchPath expandJarClassPaths(boolean x) {
expandJarClassPaths = x;
@@ -290,7 +290,7 @@
/**
* What to use when path element is the empty string
*/
- private Path emptyPathDefault = null;
+ private transient Path emptyPathDefault = null;
public SearchPath emptyPathDefault(Path x) {
emptyPathDefault = x;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java Fri Oct 11 12:08:01 2019 +0530
@@ -264,7 +264,9 @@
module_prefix + "java.lang.annotation.Native",
module_prefix + "java.lang.annotation.Repeatable",
module_prefix + "java.lang.annotation.Retention",
- module_prefix + "java.lang.annotation.Target");
+ module_prefix + "java.lang.annotation.Target",
+
+ module_prefix + "java.io.Serial");
}
private void initProcessorLoader() {
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11AEADCipher.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11AEADCipher.java Fri Oct 11 12:08:01 2019 +0530
@@ -378,9 +378,6 @@
long p11KeyID = p11Key.getKeyID();
try {
- if (session == null) {
- session = token.getOpSession();
- }
CK_MECHANISM mechWithParams;
switch (blockMode) {
case MODE_GCM:
@@ -390,6 +387,9 @@
default:
throw new ProviderException("Unsupported mode: " + blockMode);
}
+ if (session == null) {
+ session = token.getOpSession();
+ }
if (encrypt) {
token.p11.C_EncryptInit(session.id(), mechWithParams,
p11KeyID);
@@ -398,7 +398,6 @@
p11KeyID);
}
} catch (PKCS11Exception e) {
- //e.printStackTrace();
p11Key.releaseKeyID();
session = token.releaseSession(session);
throw e;
@@ -718,7 +717,9 @@
errorCode == CKR_ENCRYPTED_DATA_LEN_RANGE) {
throw (IllegalBlockSizeException)
(new IllegalBlockSizeException(e.toString()).initCause(e));
- } else if (errorCode == CKR_ENCRYPTED_DATA_INVALID) {
+ } else if (errorCode == CKR_ENCRYPTED_DATA_INVALID ||
+ // Solaris-specific
+ errorCode == CKR_GENERAL_ERROR) {
throw (BadPaddingException)
(new BadPaddingException(e.toString()).initCause(e));
}
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Digest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Digest.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,9 +103,11 @@
digestLength = 20;
break;
case (int)CKM_SHA224:
+ case (int)CKM_SHA512_224:
digestLength = 28;
break;
case (int)CKM_SHA256:
+ case (int)CKM_SHA512_256:
digestLength = 32;
break;
case (int)CKM_SHA384:
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Mac.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Mac.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -91,9 +91,11 @@
macLength = 20;
break;
case (int)CKM_SHA224_HMAC:
+ case (int)CKM_SHA512_224_HMAC:
macLength = 28;
break;
case (int)CKM_SHA256_HMAC:
+ case (int)CKM_SHA512_256_HMAC:
macLength = 32;
break;
case (int)CKM_SHA384_HMAC:
--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_convert.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_convert.c Fri Oct 11 12:08:01 2019 +0530
@@ -721,7 +721,7 @@
}
// populate using java values
- ckParamPtr->prfMechanism = jLongToCKULong(jPrfMechanism);
+ ckParamPtr->prfHashMechanism = jLongToCKULong(jPrfMechanism);
ckParamPtr->ulMacLength = jLongToCKULong(jUlMacLength);
ckParamPtr->ulServerOrClient = jLongToCKULong(jUlServerOrClient);
@@ -1014,17 +1014,18 @@
}
/*
- * converts the Java CK_GCM_PARAMS object to a CK_GCM_PARAMS pointer
+ * converts the Java CK_GCM_PARAMS object to a CK_GCM_PARAMS_NO_IVBITS pointer
+ * Note: Need to try NSS definition first to avoid SIGSEGV.
*
* @param env - used to call JNI funktions to get the Java classes and objects
* @param jParam - the Java CK_GCM_PARAMS object to convert
* @param pLength - length of the allocated memory of the returned pointer
- * @return pointer to the new CK_GCM_PARAMS structure
+ * @return pointer to the new CK_GCM_PARAMS_NO_IVBITS structure
*/
-CK_GCM_PARAMS_PTR
+CK_GCM_PARAMS_NO_IVBITS_PTR
jGCMParamsToCKGCMParamPtr(JNIEnv *env, jobject jParam, CK_ULONG *pLength)
{
- CK_GCM_PARAMS_PTR ckParamPtr;
+ CK_GCM_PARAMS_NO_IVBITS_PTR ckParamPtr;
jclass jGcmParamsClass;
jfieldID fieldID;
jobject jIv, jAad;
@@ -1052,8 +1053,8 @@
if (fieldID == NULL) { return NULL; }
jTagLen = (*env)->GetLongField(env, jParam, fieldID);
- // allocate memory for CK_GCM_PARAMS pointer
- ckParamPtr = calloc(1, sizeof(CK_GCM_PARAMS));
+ // allocate memory for CK_GCM_PARAMS_NO_IVBITS pointer
+ ckParamPtr = calloc(1, sizeof(CK_GCM_PARAMS_NO_IVBITS));
if (ckParamPtr == NULL) {
throwOutOfMemoryError(env, 0);
return NULL;
@@ -1073,16 +1074,15 @@
ckParamPtr->ulTagBits = jLongToCKULong(jTagLen);
if (pLength != NULL) {
- *pLength = sizeof(CK_GCM_PARAMS);
+ *pLength = sizeof(CK_GCM_PARAMS_NO_IVBITS);
}
- TRACE1("Created inner GCM_PARAMS PTR %lX\n", ptr_to_jlong(ckParamPtr));
+ TRACE1("Created inner GCM_PARAMS PTR w/o ulIvBits %p\n", ckParamPtr);
return ckParamPtr;
cleanup:
free(ckParamPtr->pIv);
free(ckParamPtr->pAAD);
free(ckParamPtr);
return NULL;
-
}
/*
@@ -1179,7 +1179,7 @@
throwOutOfMemoryError(env, 0);
return NULL;
}
- TRACE1("DEBUG jMechanismToCKMechanismPtr: allocated mech %p \n", ckpMech);
+ TRACE1("DEBUG jMechanismToCKMechanismPtr: allocated mech %p\n", ckpMech);
ckpMech->mechanism = jLongToCKULong(jMechType);
--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_crypt.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_crypt.c Fri Oct 11 12:08:01 2019 +0530
@@ -72,6 +72,7 @@
{
CK_SESSION_HANDLE ckSessionHandle;
CK_MECHANISM_PTR ckpMechanism = NULL;
+ CK_MECHANISM_PTR ckpTemp;
CK_OBJECT_HANDLE ckKeyHandle;
CK_RV rv;
@@ -81,15 +82,32 @@
ckSessionHandle = jLongToCKULong(jSessionHandle);
ckKeyHandle = jLongToCKULong(jKeyHandle);
ckpMechanism = jMechanismToCKMechanismPtr(env, jMechanism);
+ TRACE1("DEBUG C_EncryptInit: created pMech = %p\n",
+ ckpMechanism);
+
if ((*env)->ExceptionCheck(env)) { return; }
rv = (*ckpFunctions->C_EncryptInit)(ckSessionHandle, ckpMechanism,
ckKeyHandle);
- // if OAEP, then cannot free here
+ if (ckpMechanism->mechanism == CKM_AES_GCM) {
+ if (rv == CKR_ARGUMENTS_BAD || rv == CKR_MECHANISM_PARAM_INVALID) {
+ // retry with CKM_GCM_PARAMS structure in pkcs11t.h
+ TRACE0("DEBUG C_EncryptInit: retry with CK_GCM_PARAMS\n");
+ ckpTemp = updateGCMParams(env, ckpMechanism);
+ if (ckpTemp != NULL) { // only re-call if conversion succeeds
+ ckpMechanism = ckpTemp;
+ rv = (*ckpFunctions->C_EncryptInit)(ckSessionHandle, ckpMechanism,
+ ckKeyHandle);
+ }
+ }
+ }
+
+ TRACE1("DEBUG C_EncryptInit: freed pMech = %p\n", ckpMechanism);
freeCKMechanismPtr(ckpMechanism);
+ if (ckAssertReturnValueOK(env, rv) != CK_ASSERT_OK) { return; }
- if (ckAssertReturnValueOK(env, rv) != CK_ASSERT_OK) { return; }
+ TRACE0("FINISHED\n");
}
#endif
@@ -292,6 +310,7 @@
{
CK_SESSION_HANDLE ckSessionHandle;
CK_MECHANISM_PTR ckpMechanism = NULL;
+ CK_MECHANISM_PTR ckpTemp;
CK_OBJECT_HANDLE ckKeyHandle;
CK_RV rv;
@@ -301,15 +320,32 @@
ckSessionHandle = jLongToCKULong(jSessionHandle);
ckKeyHandle = jLongToCKULong(jKeyHandle);
ckpMechanism = jMechanismToCKMechanismPtr(env, jMechanism);
+ TRACE1("DEBUG C_DecryptInit: created pMech = %p\n",
+ ckpMechanism);
+
if ((*env)->ExceptionCheck(env)) { return; }
rv = (*ckpFunctions->C_DecryptInit)(ckSessionHandle, ckpMechanism,
ckKeyHandle);
- // if OAEP, then cannot free here
+ if (ckpMechanism->mechanism == CKM_AES_GCM) {
+ if (rv == CKR_ARGUMENTS_BAD || rv == CKR_MECHANISM_PARAM_INVALID) {
+ // retry with CKM_GCM_PARAMS structure in pkcs11t.h
+ TRACE0("DEBUG C_DecryptInit: retry with CK_GCM_PARAMS\n");
+ ckpTemp = updateGCMParams(env, ckpMechanism);
+ if (ckpTemp != NULL) { // only re-call if conversion succeeds
+ ckpMechanism = ckpTemp;
+ rv = (*ckpFunctions->C_DecryptInit)(ckSessionHandle, ckpMechanism,
+ ckKeyHandle);
+ }
+ }
+ }
+
+ TRACE1("DEBUG C_DecryptInit: freed pMech = %p\n", ckpMechanism);
freeCKMechanismPtr(ckpMechanism);
+ if (ckAssertReturnValueOK(env, rv) != CK_ASSERT_OK) { return; }
- if (ckAssertReturnValueOK(env, rv) != CK_ASSERT_OK) { return; }
+ TRACE0("FINISHED\n");
}
#endif
--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_util.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/p11_util.c Fri Oct 11 12:08:01 2019 +0530
@@ -302,29 +302,30 @@
CK_TLS12_KEY_MAT_PARAMS* tlsKmTmp;
if (mechPtr != NULL) {
- TRACE2("DEBUG: free mech %lX (mech id = 0x%lX)\n",
- ptr_to_jlong(mechPtr), mechPtr->mechanism);
+ TRACE2("DEBUG freeCKMechanismPtr: free pMech %p (mech 0x%lX)\n",
+ mechPtr, mechPtr->mechanism);
if (mechPtr->pParameter != NULL) {
+ tmp = mechPtr->pParameter;
switch (mechPtr->mechanism) {
case CKM_AES_GCM:
- tmp = mechPtr->pParameter;
- TRACE1("\t=> free GCM_PARAMS %lX\n",
- ptr_to_jlong(tmp));
- free(((CK_GCM_PARAMS*)tmp)->pIv);
- free(((CK_GCM_PARAMS*)tmp)->pAAD);
+ if (mechPtr->ulParameterLen == sizeof(CK_GCM_PARAMS_NO_IVBITS)) {
+ TRACE0("[ GCM_PARAMS w/o ulIvBits ]\n");
+ free(((CK_GCM_PARAMS_NO_IVBITS*)tmp)->pIv);
+ free(((CK_GCM_PARAMS_NO_IVBITS*)tmp)->pAAD);
+ } else if (mechPtr->ulParameterLen == sizeof(CK_GCM_PARAMS)) {
+ TRACE0("[ GCM_PARAMS ]\n");
+ free(((CK_GCM_PARAMS*)tmp)->pIv);
+ free(((CK_GCM_PARAMS*)tmp)->pAAD);
+ }
break;
case CKM_AES_CCM:
- tmp = mechPtr->pParameter;
- TRACE1("\t=> free CK_CCM_PARAMS %lX\n",
- ptr_to_jlong(tmp));
+ TRACE0("[ CK_CCM_PARAMS ]\n");
free(((CK_CCM_PARAMS*)tmp)->pNonce);
free(((CK_CCM_PARAMS*)tmp)->pAAD);
break;
case CKM_TLS_PRF:
case CKM_NSS_TLS_PRF_GENERAL:
- tmp = mechPtr->pParameter;
- TRACE1("\t=> free CK_TLS_PRF_PARAMS %lX\n",
- ptr_to_jlong(tmp));
+ TRACE0("[ CK_TLS_PRF_PARAMS ]\n");
free(((CK_TLS_PRF_PARAMS*)tmp)->pSeed);
free(((CK_TLS_PRF_PARAMS*)tmp)->pLabel);
free(((CK_TLS_PRF_PARAMS*)tmp)->pulOutputLen);
@@ -334,18 +335,16 @@
case CKM_TLS_MASTER_KEY_DERIVE:
case CKM_SSL3_MASTER_KEY_DERIVE_DH:
case CKM_TLS_MASTER_KEY_DERIVE_DH:
- sslMkdTmp = mechPtr->pParameter;
- TRACE1("\t=> free CK_SSL3_MASTER_KEY_DERIVE_PARAMS %lX\n",
- ptr_to_jlong(sslMkdTmp));
+ sslMkdTmp = tmp;
+ TRACE0("[ CK_SSL3_MASTER_KEY_DERIVE_PARAMS ]\n");
free(sslMkdTmp->RandomInfo.pClientRandom);
free(sslMkdTmp->RandomInfo.pServerRandom);
free(sslMkdTmp->pVersion);
break;
case CKM_SSL3_KEY_AND_MAC_DERIVE:
case CKM_TLS_KEY_AND_MAC_DERIVE:
- sslKmTmp = mechPtr->pParameter;
- TRACE1("\t=> free CK_SSL3_KEY_MAT_PARAMS %lX\n",
- ptr_to_jlong(sslKmTmp));
+ sslKmTmp = tmp;
+ TRACE0("[ CK_SSL3_KEY_MAT_PARAMS ]\n");
free(sslKmTmp->RandomInfo.pClientRandom);
free(sslKmTmp->RandomInfo.pServerRandom);
if (sslKmTmp->pReturnedKeyMaterial != NULL) {
@@ -356,17 +355,15 @@
break;
case CKM_TLS12_MASTER_KEY_DERIVE:
case CKM_TLS12_MASTER_KEY_DERIVE_DH:
- tlsMkdTmp = mechPtr->pParameter;
- TRACE1("\t=> CK_TLS12_MASTER_KEY_DERIVE_PARAMS %lX\n",
- ptr_to_jlong(tlsMkdTmp));
+ tlsMkdTmp = tmp;
+ TRACE0("[ CK_TLS12_MASTER_KEY_DERIVE_PARAMS ]\n");
free(tlsMkdTmp->RandomInfo.pClientRandom);
free(tlsMkdTmp->RandomInfo.pServerRandom);
free(tlsMkdTmp->pVersion);
break;
case CKM_TLS12_KEY_AND_MAC_DERIVE:
- tlsKmTmp = mechPtr->pParameter;
- TRACE1("\t=> free CK_TLS12_KEY_MAT_PARAMS %lX\n",
- ptr_to_jlong(tlsKmTmp));
+ tlsKmTmp = tmp;
+ TRACE0("[ CK_TLS12_KEY_MAT_PARAMS ]\n");
free(tlsKmTmp->RandomInfo.pClientRandom);
free(tlsKmTmp->RandomInfo.pServerRandom);
if (tlsKmTmp->pReturnedKeyMaterial != NULL) {
@@ -377,9 +374,7 @@
break;
case CKM_ECDH1_DERIVE:
case CKM_ECDH1_COFACTOR_DERIVE:
- tmp = mechPtr->pParameter;
- TRACE1("\t=> free CK_ECDH1_DERIVE_PARAMS %lX\n",
- ptr_to_jlong(tmp));
+ TRACE0("[ CK_ECDH1_DERIVE_PARAMS ]\n");
free(((CK_ECDH1_DERIVE_PARAMS *)tmp)->pSharedData);
free(((CK_ECDH1_DERIVE_PARAMS *)tmp)->pPublicData);
break;
@@ -387,7 +382,6 @@
case CKM_AES_CTR:
case CKM_RSA_PKCS_PSS:
case CKM_CAMELLIA_CTR:
- TRACE0("\t=> NO OP\n");
// params do not contain pointers
break;
default:
@@ -399,17 +393,59 @@
// CKM_EXTRACT_KEY_FROM_KEY, CKM_OTP, CKM_KIP,
// CKM_DSA_PARAMETER_GEN?, CKM_GOSTR3410_*
// CK_any_CBC_ENCRYPT_DATA?
- TRACE0("\t=> ERROR UNSUPPORTED CK PARAMS\n");
+ TRACE0("ERROR: UNSUPPORTED CK_MECHANISM\n");
break;
}
- free(mechPtr->pParameter);
+ TRACE1("\t=> freed param %p\n", tmp);
+ free(tmp);
} else {
- TRACE0("DEBUG => Parameter NULL\n");
+ TRACE0("\t=> param NULL\n");
}
free(mechPtr);
+ TRACE0("FINISHED\n");
}
}
+/* This function replaces the CK_GCM_PARAMS_NO_IVBITS structure associated
+ * with the specified CK_MECHANISM structure with CK_GCM_PARAMS
+ * structure.
+ *
+ * @param mechPtr pointer to the CK_MECHANISM structure containing
+ * the to-be-converted CK_GCM_PARAMS_NO_IVBITS structure.
+ * @return pointer to the CK_MECHANISM structure containing the
+ * converted CK_GCM_PARAMS structure or NULL if no conversion took place.
+ */
+CK_MECHANISM_PTR updateGCMParams(JNIEnv *env, CK_MECHANISM_PTR mechPtr) {
+ CK_GCM_PARAMS* pGcmParams2 = NULL;
+ CK_GCM_PARAMS_NO_IVBITS* pParams = NULL;
+ if ((mechPtr->mechanism == CKM_AES_GCM) &&
+ (mechPtr->pParameter != NULL_PTR) &&
+ (mechPtr->ulParameterLen == sizeof(CK_GCM_PARAMS_NO_IVBITS))) {
+ pGcmParams2 = calloc(1, sizeof(CK_GCM_PARAMS));
+ if (pGcmParams2 == NULL) {
+ throwOutOfMemoryError(env, 0);
+ return NULL;
+ }
+ pParams = (CK_GCM_PARAMS_NO_IVBITS*) mechPtr->pParameter;
+ pGcmParams2->pIv = pParams->pIv;
+ pGcmParams2->ulIvLen = pParams->ulIvLen;
+ pGcmParams2->ulIvBits = (pGcmParams2->ulIvLen << 3);
+ pGcmParams2->pAAD = pParams->pAAD;
+ pGcmParams2->ulAADLen = pParams->ulAADLen;
+ pGcmParams2->ulTagBits = pParams->ulTagBits;
+ TRACE1("DEBUG updateGCMParams: pMech %p\n", mechPtr);
+ TRACE2("\t=> GCM param w/o ulIvBits %p => GCM param %p\n", pParams,
+ pGcmParams2);
+ free(pParams);
+ mechPtr->pParameter = pGcmParams2;
+ mechPtr->ulParameterLen = sizeof(CK_GCM_PARAMS);
+ return mechPtr;
+ } else {
+ TRACE0("DEBUG updateGCMParams: no conversion done\n");
+ }
+ return NULL;
+}
+
/*
* the following functions convert Java arrays to PKCS#11 array pointers and
* their array length and vice versa
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11gcm2.h Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* There is a known incompatibility for CK_GCM_PARAMS structure.
+ * PKCS#11 v2.40 standard mechanisms specification specifies
+ * CK_GCM_PARAMS as
+ * typedef struct CK_GCM_PARAMS {
+ * CK_BYTE_PTR pIv;
+ * CK_ULONG ulIvLen;
+ * CK_BYTE_PTR pAAD;
+ * CK_ULONG ulAADLen;
+ * CK_ULONG ulTagBits;
+ * } CK_GCM_PARAMS;
+ * However, the official header file of PKCS#11 v2.40 defines the
+ * CK_GCM_PARAMS with an extra "ulIvBits" field (type CK_ULONG).
+ * NSS uses the spec version while Solaris and SoftHSM2 use the header
+ * version. In order to work with both sides, SunPKCS11 provider defines
+ * the spec version of CK_GCM_PARAMS as CK_GCM_PARAMS_NO_IVBITS (as in this
+ * file) and uses it first before failing over to the header version.
+ */
+#ifndef _PKCS11GCM2_H_
+#define _PKCS11GCM2_H_ 1
+
+/* include the platform dependent part of the header */
+typedef struct CK_GCM_PARAMS_NO_IVBITS {
+ CK_BYTE_PTR pIv;
+ CK_ULONG ulIvLen;
+ CK_BYTE_PTR pAAD;
+ CK_ULONG ulAADLen;
+ CK_ULONG ulTagBits;
+} CK_GCM_PARAMS_NO_IVBITS;
+
+typedef CK_GCM_PARAMS_NO_IVBITS CK_PTR CK_GCM_PARAMS_NO_IVBITS_PTR;
+
+#endif /* _PKCS11GCM2_H_ */
--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11t.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11t.h Fri Oct 11 12:08:01 2019 +0530
@@ -1833,6 +1833,7 @@
typedef struct CK_GCM_PARAMS {
CK_BYTE_PTR pIv;
CK_ULONG ulIvLen;
+ CK_ULONG ulIvBits;
CK_BYTE_PTR pAAD;
CK_ULONG ulAADLen;
CK_ULONG ulTagBits;
@@ -1962,7 +1963,7 @@
typedef CK_TLS_KDF_PARAMS CK_PTR CK_TLS_KDF_PARAMS_PTR;
typedef struct CK_TLS_MAC_PARAMS {
- CK_MECHANISM_TYPE prfMechanism;
+ CK_MECHANISM_TYPE prfHashMechanism;
CK_ULONG ulMacLength;
CK_ULONG ulServerOrClient;
} CK_TLS_MAC_PARAMS;
@@ -2000,3 +2001,4 @@
#endif /* _PKCS11T_H_ */
+
--- a/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11wrapper.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/share/native/libj2pkcs11/pkcs11wrapper.h Fri Oct 11 12:08:01 2019 +0530
@@ -159,7 +159,6 @@
/* include the platform dependent part of the header */
#include "p11_md.h"
-#include "pkcs11.h"
#include <jni.h>
#include <jni_util.h>
#include <stdarg.h>
@@ -296,6 +295,10 @@
#define CLASS_TLS_PRF_PARAMS "sun/security/pkcs11/wrapper/CK_TLS_PRF_PARAMS"
#define CLASS_TLS_MAC_PARAMS "sun/security/pkcs11/wrapper/CK_TLS_MAC_PARAMS"
+/* function to update the CK_NSS_GCM_PARAMS in mechanism pointer with
+ * CK_GCM_PARAMS
+ */
+CK_MECHANISM_PTR updateGCMParams(JNIEnv *env, CK_MECHANISM_PTR mechPtr);
/* function to convert a PKCS#11 return value other than CK_OK into a Java Exception
* or to throw a PKCS11RuntimeException
--- a/src/jdk.crypto.cryptoki/unix/native/libj2pkcs11/p11_md.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/unix/native/libj2pkcs11/p11_md.h Fri Oct 11 12:08:01 2019 +0530
@@ -1,3 +1,7 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
/*
* reserved comment block
* DO NOT REMOVE OR ALTER!
@@ -69,6 +73,7 @@
#endif
#include "pkcs11.h"
+#include "pkcs11gcm2.h"
#include "jni.h"
--- a/src/jdk.crypto.cryptoki/windows/native/libj2pkcs11/p11_md.h Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.crypto.cryptoki/windows/native/libj2pkcs11/p11_md.h Fri Oct 11 12:08:01 2019 +0530
@@ -1,3 +1,7 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
/*
* reserved comment block
* DO NOT REMOVE OR ALTER!
@@ -77,6 +81,7 @@
#endif /* CreateMutex */
#include "pkcs11.h"
+#include "pkcs11gcm2.h"
/* statement according to PKCS11 docu */
#pragma pack(pop, cryptoki)
--- a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_core.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_core.c Fri Oct 11 12:08:01 2019 +0530
@@ -31,336 +31,14 @@
#include <elf.h>
#include <link.h>
#include "libproc_impl.h"
+#include "ps_core_common.h"
#include "proc_service.h"
#include "salibelf.h"
-#include "cds.h"
// This file has the libproc implementation to read core files.
// For live processes, refer to ps_proc.c. Portions of this is adapted
// /modelled after Solaris libproc.so (in particular Pcore.c)
-//----------------------------------------------------------------------
-// ps_prochandle cleanup helper functions
-
-// close all file descriptors
-static void close_files(struct ps_prochandle* ph) {
- lib_info* lib = NULL;
-
- // close core file descriptor
- if (ph->core->core_fd >= 0)
- close(ph->core->core_fd);
-
- // close exec file descriptor
- if (ph->core->exec_fd >= 0)
- close(ph->core->exec_fd);
-
- // close interp file descriptor
- if (ph->core->interp_fd >= 0)
- close(ph->core->interp_fd);
-
- // close class share archive file
- if (ph->core->classes_jsa_fd >= 0)
- close(ph->core->classes_jsa_fd);
-
- // close all library file descriptors
- lib = ph->libs;
- while (lib) {
- int fd = lib->fd;
- if (fd >= 0 && fd != ph->core->exec_fd) {
- close(fd);
- }
- lib = lib->next;
- }
-}
-
-// clean all map_info stuff
-static void destroy_map_info(struct ps_prochandle* ph) {
- map_info* map = ph->core->maps;
- while (map) {
- map_info* next = map->next;
- free(map);
- map = next;
- }
-
- if (ph->core->map_array) {
- free(ph->core->map_array);
- }
-
- // Part of the class sharing workaround
- map = ph->core->class_share_maps;
- while (map) {
- map_info* next = map->next;
- free(map);
- map = next;
- }
-}
-
-// ps_prochandle operations
-static void core_release(struct ps_prochandle* ph) {
- if (ph->core) {
- close_files(ph);
- destroy_map_info(ph);
- free(ph->core);
- }
-}
-
-static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) {
- map_info* map;
- if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
- print_debug("can't allocate memory for map_info\n");
- return NULL;
- }
-
- // initialize map
- map->fd = fd;
- map->offset = offset;
- map->vaddr = vaddr;
- map->memsz = memsz;
- return map;
-}
-
-// add map info with given fd, offset, vaddr and memsz
-static map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
- uintptr_t vaddr, size_t memsz) {
- map_info* map;
- if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
- return NULL;
- }
-
- // add this to map list
- map->next = ph->core->maps;
- ph->core->maps = map;
- ph->core->num_maps++;
-
- return map;
-}
-
-// Part of the class sharing workaround
-static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
- uintptr_t vaddr, size_t memsz) {
- map_info* map;
- if ((map = allocate_init_map(ph->core->classes_jsa_fd,
- offset, vaddr, memsz)) == NULL) {
- return NULL;
- }
-
- map->next = ph->core->class_share_maps;
- ph->core->class_share_maps = map;
- return map;
-}
-
-// Return the map_info for the given virtual address. We keep a sorted
-// array of pointers in ph->map_array, so we can binary search.
-static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
- int mid, lo = 0, hi = ph->core->num_maps - 1;
- map_info *mp;
-
- while (hi - lo > 1) {
- mid = (lo + hi) / 2;
- if (addr >= ph->core->map_array[mid]->vaddr) {
- lo = mid;
- } else {
- hi = mid;
- }
- }
-
- if (addr < ph->core->map_array[hi]->vaddr) {
- mp = ph->core->map_array[lo];
- } else {
- mp = ph->core->map_array[hi];
- }
-
- if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
- return (mp);
- }
-
-
- // Part of the class sharing workaround
- // Unfortunately, we have no way of detecting -Xshare state.
- // Check out the share maps atlast, if we don't find anywhere.
- // This is done this way so to avoid reading share pages
- // ahead of other normal maps. For eg. with -Xshare:off we don't
- // want to prefer class sharing data to data from core.
- mp = ph->core->class_share_maps;
- if (mp) {
- print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr);
- }
- while (mp) {
- if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
- print_debug("located map_info at 0x%lx from class share maps\n", addr);
- return (mp);
- }
- mp = mp->next;
- }
-
- print_debug("can't locate map_info at 0x%lx\n", addr);
- return (NULL);
-}
-
-//---------------------------------------------------------------
-// Part of the class sharing workaround:
-//
-// With class sharing, pages are mapped from classes.jsa file.
-// The read-only class sharing pages are mapped as MAP_SHARED,
-// PROT_READ pages. These pages are not dumped into core dump.
-// With this workaround, these pages are read from classes.jsa.
-
-static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
- jboolean i;
- if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
- *pvalue = i;
- return true;
- } else {
- return false;
- }
-}
-
-static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) {
- uintptr_t uip;
- if (ps_pdread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) {
- *pvalue = uip;
- return true;
- } else {
- return false;
- }
-}
-
-// used to read strings from debuggee
-static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) {
- size_t i = 0;
- char c = ' ';
-
- while (c != '\0') {
- if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) {
- return false;
- }
- if (i < size - 1) {
- buf[i] = c;
- } else {
- // smaller buffer
- return false;
- }
- i++; addr++;
- }
-
- buf[i] = '\0';
- return true;
-}
-
-#define USE_SHARED_SPACES_SYM "UseSharedSpaces"
-// mangled name of Arguments::SharedArchivePath
-#define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
-#define LIBJVM_NAME "/libjvm.so"
-
-static bool init_classsharing_workaround(struct ps_prochandle* ph) {
- lib_info* lib = ph->libs;
- while (lib != NULL) {
- // we are iterating over shared objects from the core dump. look for
- // libjvm.so.
- const char *jvm_name = 0;
- if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
- char classes_jsa[PATH_MAX];
- CDSFileMapHeaderBase header;
- int fd = -1;
- int m = 0;
- size_t n = 0;
- uintptr_t base = 0, useSharedSpacesAddr = 0;
- uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
- jboolean useSharedSpaces = 0;
- map_info* mi = 0;
-
- memset(classes_jsa, 0, sizeof(classes_jsa));
- jvm_name = lib->name;
- useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
- if (useSharedSpacesAddr == 0) {
- print_debug("can't lookup 'UseSharedSpaces' flag\n");
- return false;
- }
-
- // Hotspot vm types are not exported to build this library. So
- // using equivalent type jboolean to read the value of
- // UseSharedSpaces which is same as hotspot type "bool".
- if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
- print_debug("can't read the value of 'UseSharedSpaces' flag\n");
- return false;
- }
-
- if ((int)useSharedSpaces == 0) {
- print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
- return true;
- }
-
- sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
- if (sharedArchivePathAddrAddr == 0) {
- print_debug("can't lookup shared archive path symbol\n");
- return false;
- }
-
- if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
- print_debug("can't read shared archive path pointer\n");
- return false;
- }
-
- if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
- print_debug("can't read shared archive path value\n");
- return false;
- }
-
- print_debug("looking for %s\n", classes_jsa);
- // open the class sharing archive file
- fd = pathmap_open(classes_jsa);
- if (fd < 0) {
- print_debug("can't open %s!\n", classes_jsa);
- ph->core->classes_jsa_fd = -1;
- return false;
- } else {
- print_debug("opened %s\n", classes_jsa);
- }
-
- // read CDSFileMapHeaderBase from the file
- memset(&header, 0, sizeof(CDSFileMapHeaderBase));
- if ((n = read(fd, &header, sizeof(CDSFileMapHeaderBase)))
- != sizeof(CDSFileMapHeaderBase)) {
- print_debug("can't read shared archive file map header from %s\n", classes_jsa);
- close(fd);
- return false;
- }
-
- // check file magic
- if (header._magic != CDS_ARCHIVE_MAGIC) {
- print_debug("%s has bad shared archive file magic number 0x%x, expecting 0x%x\n",
- classes_jsa, header._magic, CDS_ARCHIVE_MAGIC);
- close(fd);
- return false;
- }
-
- // check version
- if (header._version != CURRENT_CDS_ARCHIVE_VERSION) {
- print_debug("%s has wrong shared archive file version %d, expecting %d\n",
- classes_jsa, header._version, CURRENT_CDS_ARCHIVE_VERSION);
- close(fd);
- return false;
- }
-
- ph->core->classes_jsa_fd = fd;
- // add read-only maps from classes.jsa to the list of maps
- for (m = 0; m < NUM_CDS_REGIONS; m++) {
- if (header._space[m]._read_only) {
- base = (uintptr_t) header._space[m]._addr._base;
- // no need to worry about the fractional pages at-the-end.
- // possible fractional pages are handled by core_read_data.
- add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
- base, (size_t) header._space[m]._used);
- print_debug("added a share archive map at 0x%lx\n", base);
- }
- }
- return true;
- }
- lib = lib->next;
- }
- return true;
-}
-
//---------------------------------------------------------------------------
// functions to handle map_info
--- a/src/jdk.hotspot.agent/macosx/native/libsaproc/ps_core.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.hotspot.agent/macosx/native/libsaproc/ps_core.c Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include <stdlib.h>
#include <stddef.h>
#include "libproc_impl.h"
-#include "cds.h"
+#include "ps_core_common.h"
#ifdef __APPLE__
#include "sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext.h"
@@ -39,332 +39,6 @@
// For live processes, refer to ps_proc.c. Portions of this is adapted
// /modelled after Solaris libproc.so (in particular Pcore.c)
-//----------------------------------------------------------------------
-// ps_prochandle cleanup helper functions
-
-// close all file descriptors
-static void close_files(struct ps_prochandle* ph) {
- lib_info* lib = NULL;
-
- // close core file descriptor
- if (ph->core->core_fd >= 0)
- close(ph->core->core_fd);
-
- // close exec file descriptor
- if (ph->core->exec_fd >= 0)
- close(ph->core->exec_fd);
-
- // close interp file descriptor
- if (ph->core->interp_fd >= 0)
- close(ph->core->interp_fd);
-
- // close class share archive file
- if (ph->core->classes_jsa_fd >= 0)
- close(ph->core->classes_jsa_fd);
-
- // close all library file descriptors
- lib = ph->libs;
- while (lib) {
- int fd = lib->fd;
- if (fd >= 0 && fd != ph->core->exec_fd) {
- close(fd);
- }
- lib = lib->next;
- }
-}
-
-// clean all map_info stuff
-static void destroy_map_info(struct ps_prochandle* ph) {
- map_info* map = ph->core->maps;
- while (map) {
- map_info* next = map->next;
- free(map);
- map = next;
- }
-
- if (ph->core->map_array) {
- free(ph->core->map_array);
- }
-
- // Part of the class sharing workaround
- map = ph->core->class_share_maps;
- while (map) {
- map_info* next = map->next;
- free(map);
- map = next;
- }
-}
-
-// ps_prochandle operations
-static void core_release(struct ps_prochandle* ph) {
- if (ph->core) {
- close_files(ph);
- destroy_map_info(ph);
- free(ph->core);
- }
-}
-
-static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) {
- map_info* map;
- if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
- print_debug("can't allocate memory for map_info\n");
- return NULL;
- }
-
- // initialize map
- map->fd = fd;
- map->offset = offset;
- map->vaddr = vaddr;
- map->memsz = memsz;
- return map;
-}
-
-// add map info with given fd, offset, vaddr and memsz
-static map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
- uintptr_t vaddr, size_t memsz) {
- map_info* map;
- if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
- return NULL;
- }
-
- // add this to map list
- map->next = ph->core->maps;
- ph->core->maps = map;
- ph->core->num_maps++;
-
- return map;
-}
-
-// Part of the class sharing workaround
-static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
- uintptr_t vaddr, size_t memsz) {
- map_info* map;
- if ((map = allocate_init_map(ph->core->classes_jsa_fd,
- offset, vaddr, memsz)) == NULL) {
- return NULL;
- }
-
- map->next = ph->core->class_share_maps;
- ph->core->class_share_maps = map;
- return map;
-}
-
-// Return the map_info for the given virtual address. We keep a sorted
-// array of pointers in ph->map_array, so we can binary search.
-static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
- int mid, lo = 0, hi = ph->core->num_maps - 1;
- map_info *mp;
-
- while (hi - lo > 1) {
- mid = (lo + hi) / 2;
- if (addr >= ph->core->map_array[mid]->vaddr) {
- lo = mid;
- } else {
- hi = mid;
- }
- }
-
- if (addr < ph->core->map_array[hi]->vaddr) {
- mp = ph->core->map_array[lo];
- } else {
- mp = ph->core->map_array[hi];
- }
-
- if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
- return (mp);
- }
-
-
- // Part of the class sharing workaround
- // Unfortunately, we have no way of detecting -Xshare state.
- // Check out the share maps atlast, if we don't find anywhere.
- // This is done this way so to avoid reading share pages
- // ahead of other normal maps. For eg. with -Xshare:off we don't
- // want to prefer class sharing data to data from core.
- mp = ph->core->class_share_maps;
- if (mp) {
- print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr);
- }
- while (mp) {
- if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
- print_debug("located map_info at 0x%lx from class share maps\n", addr);
- return (mp);
- }
- mp = mp->next;
- }
-
- print_debug("can't locate map_info at 0x%lx\n", addr);
- return (NULL);
-}
-
-//---------------------------------------------------------------
-// Part of the class sharing workaround:
-//
-// With class sharing, pages are mapped from classes.jsa file.
-// The read-only class sharing pages are mapped as MAP_SHARED,
-// PROT_READ pages. These pages are not dumped into core dump.
-// With this workaround, these pages are read from classes.jsa.
-
-static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
- jboolean i;
- if (ps_pread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
- *pvalue = i;
- return true;
- } else {
- return false;
- }
-}
-
-static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) {
- uintptr_t uip;
- if (ps_pread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) {
- *pvalue = uip;
- return true;
- } else {
- return false;
- }
-}
-
-// used to read strings from debuggee
-static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) {
- size_t i = 0;
- char c = ' ';
-
- while (c != '\0') {
- if (ps_pread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) {
- return false;
- }
- if (i < size - 1) {
- buf[i] = c;
- } else {
- // smaller buffer
- return false;
- }
- i++; addr++;
- }
- buf[i] = '\0';
- return true;
-}
-
-// mangled name of Arguments::SharedArchivePath
-#define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE"
-
-#ifdef __APPLE__
-#define USE_SHARED_SPACES_SYM "_UseSharedSpaces"
-#define LIBJVM_NAME "/libjvm.dylib"
-#else
-#define USE_SHARED_SPACES_SYM "UseSharedSpaces"
-#define LIBJVM_NAME "/libjvm.so"
-#endif // __APPLE_
-
-static bool init_classsharing_workaround(struct ps_prochandle* ph) {
- int m;
- size_t n;
- lib_info* lib = ph->libs;
- while (lib != NULL) {
- // we are iterating over shared objects from the core dump. look for
- // libjvm.so.
- const char *jvm_name = 0;
- if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
- char classes_jsa[PATH_MAX];
- CDSFileMapHeaderBase header;
- int fd = -1;
- uintptr_t base = 0, useSharedSpacesAddr = 0;
- uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
- jboolean useSharedSpaces = 0;
-
- memset(classes_jsa, 0, sizeof(classes_jsa));
- jvm_name = lib->name;
- useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
- if (useSharedSpacesAddr == 0) {
- print_debug("can't lookup 'UseSharedSpaces' flag\n");
- return false;
- }
-
- // Hotspot vm types are not exported to build this library. So
- // using equivalent type jboolean to read the value of
- // UseSharedSpaces which is same as hotspot type "bool".
- if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
- print_debug("can't read the value of 'UseSharedSpaces' flag\n");
- return false;
- }
-
- if ((int)useSharedSpaces == 0) {
- print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
- return true;
- }
-
- sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
- if (sharedArchivePathAddrAddr == 0) {
- print_debug("can't lookup shared archive path symbol\n");
- return false;
- }
-
- if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
- print_debug("can't read shared archive path pointer\n");
- return false;
- }
-
- if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
- print_debug("can't read shared archive path value\n");
- return false;
- }
-
- print_debug("looking for %s\n", classes_jsa);
- // open the class sharing archive file
- fd = pathmap_open(classes_jsa);
- if (fd < 0) {
- print_debug("can't open %s!\n", classes_jsa);
- ph->core->classes_jsa_fd = -1;
- return false;
- } else {
- print_debug("opened %s\n", classes_jsa);
- }
-
- // read CDSFileMapHeaderBase from the file
- memset(&header, 0, sizeof(CDSFileMapHeaderBase));
- if ((n = read(fd, &header, sizeof(CDSFileMapHeaderBase)))
- != sizeof(CDSFileMapHeaderBase)) {
- print_debug("can't read shared archive file map header from %s\n", classes_jsa);
- close(fd);
- return false;
- }
-
- // check file magic
- if (header._magic != CDS_ARCHIVE_MAGIC) {
- print_debug("%s has bad shared archive file magic number 0x%x, expecting 0x%x\n",
- classes_jsa, header._magic, CDS_ARCHIVE_MAGIC);
- close(fd);
- return false;
- }
-
- // check version
- if (header._version != CURRENT_CDS_ARCHIVE_VERSION) {
- print_debug("%s has wrong shared archive file version %d, expecting %d\n",
- classes_jsa, header._version, CURRENT_CDS_ARCHIVE_VERSION);
- close(fd);
- return false;
- }
-
- ph->core->classes_jsa_fd = fd;
- // add read-only maps from classes.jsa to the list of maps
- for (m = 0; m < NUM_CDS_REGIONS; m++) {
- if (header._space[m]._read_only) {
- base = (uintptr_t) header._space[m]._addr._base;
- // no need to worry about the fractional pages at-the-end.
- // possible fractional pages are handled by core_read_data.
- add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
- base, (size_t) header._space[m]._used);
- print_debug("added a share archive map at 0x%lx\n", base);
- }
- }
- return true;
- }
- lib = lib->next;
- }
- return true;
-}
-
//---------------------------------------------------------------------------
// functions to handle map_info
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java Fri Oct 11 12:08:01 2019 +0530
@@ -47,7 +47,6 @@
static int REGISTER_MASK_IN_PLACE;
// Types of OopValues
- static int UNUSED_VALUE;
static int OOP_VALUE;
static int NARROWOOP_VALUE;
static int CALLEE_SAVED_VALUE;
@@ -70,7 +69,6 @@
TYPE_MASK_IN_PLACE = db.lookupIntConstant("OopMapValue::type_mask_in_place").intValue();
REGISTER_MASK = db.lookupIntConstant("OopMapValue::register_mask").intValue();
REGISTER_MASK_IN_PLACE = db.lookupIntConstant("OopMapValue::register_mask_in_place").intValue();
- UNUSED_VALUE = db.lookupIntConstant("OopMapValue::unused_value").intValue();
OOP_VALUE = db.lookupIntConstant("OopMapValue::oop_value").intValue();
NARROWOOP_VALUE = db.lookupIntConstant("OopMapValue::narrowoop_value").intValue();
CALLEE_SAVED_VALUE = db.lookupIntConstant("OopMapValue::callee_saved_value").intValue();
@@ -78,7 +76,6 @@
}
public static abstract class OopTypes {
- public static final OopTypes UNUSED_VALUE = new OopTypes() { int getValue() { return OopMapValue.UNUSED_VALUE; }};
public static final OopTypes OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.OOP_VALUE; }};
public static final OopTypes NARROWOOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.NARROWOOP_VALUE; }};
public static final OopTypes CALLEE_SAVED_VALUE = new OopTypes() { int getValue() { return OopMapValue.CALLEE_SAVED_VALUE; }};
@@ -111,8 +108,7 @@
public OopTypes getType() {
int which = (getValue() & TYPE_MASK_IN_PLACE);
- if (which == UNUSED_VALUE) return OopTypes.UNUSED_VALUE;
- else if (which == OOP_VALUE) return OopTypes.OOP_VALUE;
+ if (which == OOP_VALUE) return OopTypes.OOP_VALUE;
else if (which == NARROWOOP_VALUE) return OopTypes.NARROWOOP_VALUE;
else if (which == CALLEE_SAVED_VALUE) return OopTypes.CALLEE_SAVED_VALUE;
else if (which == DERIVED_OOP_VALUE) return OopTypes.DERIVED_OOP_VALUE;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/NotificationThread.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+
+import sun.jvm.hotspot.debugger.Address;
+
+public class NotificationThread extends JavaThread {
+ public NotificationThread(Address addr) {
+ super(addr);
+ }
+
+ public boolean isJavaThread() { return false; }
+
+}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java Fri Oct 11 12:08:01 2019 +0530
@@ -158,6 +158,7 @@
}
virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
virtualConstructor.addMapping("ServiceThread", ServiceThread.class);
+ virtualConstructor.addMapping("NotificationThread", NotificationThread.class);
}
public Threads() {
@@ -165,14 +166,14 @@
}
/** NOTE: this returns objects of type JavaThread, CompilerThread,
- JvmtiAgentThread, and ServiceThread.
+ JvmtiAgentThread, NotificationThread, and ServiceThread.
The latter four are subclasses of the former. Most operations
(fetching the top frame, etc.) are only allowed to be performed on
a "pure" JavaThread. For this reason, {@link
sun.jvm.hotspot.runtime.JavaThread#isJavaThread} has been
changed from the definition in the VM (which returns true for
all of these thread types) to return true for JavaThreads and
- false for the three subclasses. FIXME: should reconsider the
+ false for the four subclasses. FIXME: should reconsider the
inheritance hierarchy; see {@link
sun.jvm.hotspot.runtime.JavaThread#isJavaThread}. */
public JavaThread getJavaThreadAt(int i) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/JMap.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/JMap.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -182,9 +182,8 @@
hgw.write(fileName);
System.out.println("heap written to " + fileName);
return true;
- } catch (IOException | RuntimeException exp) {
- System.err.println(exp.getMessage());
- return false;
+ } catch (IOException exp) {
+ throw new RuntimeException(exp);
}
}
@@ -199,8 +198,7 @@
System.out.println("heap written to " + fileName);
return true;
} catch (IOException exp) {
- System.err.println(exp.getMessage());
- return false;
+ throw new RuntimeException(exp);
}
}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/Tool.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/Tool.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -116,6 +116,8 @@
try {
returnStatus = start(args);
+ } catch (Throwable t) {
+ t.printStackTrace(System.err);
} finally {
stop();
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/native/libsaproc/ps_core_common.c Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <jni.h> // just include something, or else solaris compiler will complain that this file is empty
+
+#if defined(LINUX) || defined(__APPLE__)
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stddef.h>
+#ifdef LINUX
+#include <elf.h>
+#include <link.h>
+#include "proc_service.h"
+#include "salibelf.h"
+#endif
+#include "libproc_impl.h"
+#include "cds.h"
+
+#ifdef __APPLE__
+#include "sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext.h"
+#endif
+
+#ifdef LINUX
+// I have no idea why this function is called ps_pread() on macos but ps_pdread on linux.
+#define ps_pread ps_pdread
+#endif
+
+// Common code shared between linux/native/libsaproc/ps_core.c and macosx/native/libsaproc/ps_core.c
+
+//----------------------------------------------------------------------
+// ps_prochandle cleanup helper functions
+
+// close all file descriptors
+static void close_files(struct ps_prochandle* ph) {
+ lib_info* lib = NULL;
+
+ // close core file descriptor
+ if (ph->core->core_fd >= 0)
+ close(ph->core->core_fd);
+
+ // close exec file descriptor
+ if (ph->core->exec_fd >= 0)
+ close(ph->core->exec_fd);
+
+ // close interp file descriptor
+ if (ph->core->interp_fd >= 0)
+ close(ph->core->interp_fd);
+
+ // close class share archive file
+ if (ph->core->classes_jsa_fd >= 0)
+ close(ph->core->classes_jsa_fd);
+
+ // close all library file descriptors
+ lib = ph->libs;
+ while (lib) {
+ int fd = lib->fd;
+ if (fd >= 0 && fd != ph->core->exec_fd) {
+ close(fd);
+ }
+ lib = lib->next;
+ }
+}
+
+// clean all map_info stuff
+static void destroy_map_info(struct ps_prochandle* ph) {
+ map_info* map = ph->core->maps;
+ while (map) {
+ map_info* next = map->next;
+ free(map);
+ map = next;
+ }
+
+ if (ph->core->map_array) {
+ free(ph->core->map_array);
+ }
+
+ // Part of the class sharing workaround
+ map = ph->core->class_share_maps;
+ while (map) {
+ map_info* next = map->next;
+ free(map);
+ map = next;
+ }
+}
+
+// ps_prochandle operations
+void core_release(struct ps_prochandle* ph) {
+ if (ph->core) {
+ close_files(ph);
+ destroy_map_info(ph);
+ free(ph->core);
+ }
+}
+
+static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) {
+ map_info* map;
+ if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
+ print_debug("can't allocate memory for map_info\n");
+ return NULL;
+ }
+
+ // initialize map
+ map->fd = fd;
+ map->offset = offset;
+ map->vaddr = vaddr;
+ map->memsz = memsz;
+ return map;
+}
+
+// add map info with given fd, offset, vaddr and memsz
+map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
+ uintptr_t vaddr, size_t memsz) {
+ map_info* map;
+ if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
+ return NULL;
+ }
+
+ // add this to map list
+ map->next = ph->core->maps;
+ ph->core->maps = map;
+ ph->core->num_maps++;
+
+ return map;
+}
+
+// Part of the class sharing workaround
+static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
+ uintptr_t vaddr, size_t memsz) {
+ map_info* map;
+ if ((map = allocate_init_map(ph->core->classes_jsa_fd,
+ offset, vaddr, memsz)) == NULL) {
+ return NULL;
+ }
+
+ map->next = ph->core->class_share_maps;
+ ph->core->class_share_maps = map;
+ return map;
+}
+
+// Return the map_info for the given virtual address. We keep a sorted
+// array of pointers in ph->map_array, so we can binary search.
+map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
+ int mid, lo = 0, hi = ph->core->num_maps - 1;
+ map_info *mp;
+
+ while (hi - lo > 1) {
+ mid = (lo + hi) / 2;
+ if (addr >= ph->core->map_array[mid]->vaddr) {
+ lo = mid;
+ } else {
+ hi = mid;
+ }
+ }
+
+ if (addr < ph->core->map_array[hi]->vaddr) {
+ mp = ph->core->map_array[lo];
+ } else {
+ mp = ph->core->map_array[hi];
+ }
+
+ if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
+ return (mp);
+ }
+
+
+ // Part of the class sharing workaround
+ // Unfortunately, we have no way of detecting -Xshare state.
+ // Check out the share maps atlast, if we don't find anywhere.
+ // This is done this way so to avoid reading share pages
+ // ahead of other normal maps. For eg. with -Xshare:off we don't
+ // want to prefer class sharing data to data from core.
+ mp = ph->core->class_share_maps;
+ if (mp) {
+ print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr);
+ }
+ while (mp) {
+ if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
+ print_debug("located map_info at 0x%lx from class share maps\n", addr);
+ return (mp);
+ }
+ mp = mp->next;
+ }
+
+ print_debug("can't locate map_info at 0x%lx\n", addr);
+ return (NULL);
+}
+
+//---------------------------------------------------------------
+// Part of the class sharing workaround:
+//
+// With class sharing, pages are mapped from classes.jsa file.
+// The read-only class sharing pages are mapped as MAP_SHARED,
+// PROT_READ pages. These pages are not dumped into core dump.
+// With this workaround, these pages are read from classes.jsa.
+
+static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
+ jboolean i;
+ if (ps_pread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
+ *pvalue = i;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) {
+ uintptr_t uip;
+ if (ps_pread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) {
+ *pvalue = uip;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// used to read strings from debuggee
+bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) {
+ size_t i = 0;
+ char c = ' ';
+
+ while (c != '\0') {
+ if (ps_pread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) {
+ return false;
+ }
+ if (i < size - 1) {
+ buf[i] = c;
+ } else {
+ // smaller buffer
+ return false;
+ }
+ i++; addr++;
+ }
+ buf[i] = '\0';
+ return true;
+}
+
+#ifdef LINUX
+// mangled name of Arguments::SharedArchivePath
+#define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
+#define USE_SHARED_SPACES_SYM "UseSharedSpaces"
+#define LIBJVM_NAME "/libjvm.so"
+#endif
+
+#ifdef __APPLE__
+// mangled name of Arguments::SharedArchivePath
+#define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE"
+#define USE_SHARED_SPACES_SYM "_UseSharedSpaces"
+#define LIBJVM_NAME "/libjvm.dylib"
+#endif
+
+bool init_classsharing_workaround(struct ps_prochandle* ph) {
+ lib_info* lib = ph->libs;
+ while (lib != NULL) {
+ // we are iterating over shared objects from the core dump. look for
+ // libjvm.so.
+ const char *jvm_name = 0;
+ if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
+ char classes_jsa[PATH_MAX];
+ CDSFileMapHeaderBase header;
+ int fd = -1;
+ uintptr_t base = 0, useSharedSpacesAddr = 0;
+ uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
+ jboolean useSharedSpaces = 0;
+ int m;
+ size_t n;
+
+ memset(classes_jsa, 0, sizeof(classes_jsa));
+ jvm_name = lib->name;
+ useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
+ if (useSharedSpacesAddr == 0) {
+ print_debug("can't lookup 'UseSharedSpaces' flag\n");
+ return false;
+ }
+
+ // Hotspot vm types are not exported to build this library. So
+ // using equivalent type jboolean to read the value of
+ // UseSharedSpaces which is same as hotspot type "bool".
+ if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
+ print_debug("can't read the value of 'UseSharedSpaces' flag\n");
+ return false;
+ }
+
+ if ((int)useSharedSpaces == 0) {
+ print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
+ return true;
+ }
+
+ sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
+ if (sharedArchivePathAddrAddr == 0) {
+ print_debug("can't lookup shared archive path symbol\n");
+ return false;
+ }
+
+ if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
+ print_debug("can't read shared archive path pointer\n");
+ return false;
+ }
+
+ if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
+ print_debug("can't read shared archive path value\n");
+ return false;
+ }
+
+ print_debug("looking for %s\n", classes_jsa);
+ // open the class sharing archive file
+ fd = pathmap_open(classes_jsa);
+ if (fd < 0) {
+ print_debug("can't open %s!\n", classes_jsa);
+ ph->core->classes_jsa_fd = -1;
+ return false;
+ } else {
+ print_debug("opened %s\n", classes_jsa);
+ }
+
+ // read CDSFileMapHeaderBase from the file
+ memset(&header, 0, sizeof(CDSFileMapHeaderBase));
+ if ((n = read(fd, &header, sizeof(CDSFileMapHeaderBase)))
+ != sizeof(CDSFileMapHeaderBase)) {
+ print_debug("can't read shared archive file map header from %s\n", classes_jsa);
+ close(fd);
+ return false;
+ }
+
+ // check file magic
+ if (header._magic != CDS_ARCHIVE_MAGIC) {
+ print_debug("%s has bad shared archive file magic number 0x%x, expecting 0x%x\n",
+ classes_jsa, header._magic, CDS_ARCHIVE_MAGIC);
+ close(fd);
+ return false;
+ }
+
+ // check version
+ if (header._version != CURRENT_CDS_ARCHIVE_VERSION) {
+ print_debug("%s has wrong shared archive file version %d, expecting %d\n",
+ classes_jsa, header._version, CURRENT_CDS_ARCHIVE_VERSION);
+ close(fd);
+ return false;
+ }
+
+ ph->core->classes_jsa_fd = fd;
+ // add read-only maps from classes.jsa to the list of maps
+ for (m = 0; m < NUM_CDS_REGIONS; m++) {
+ if (header._space[m]._read_only) {
+ // With *some* linux versions, the core file doesn't include read-only mmap'ed
+ // files regions, so let's add them here. This is harmless if the core file also
+ // include these regions.
+ base = (uintptr_t) header._space[m]._addr._base;
+ // no need to worry about the fractional pages at-the-end.
+ // possible fractional pages are handled by core_read_data.
+ add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
+ base, (size_t) header._space[m]._used);
+ print_debug("added a share archive map at 0x%lx\n", base);
+ }
+ }
+ return true;
+ }
+ lib = lib->next;
+ }
+ return true;
+}
+
+#endif // defined(LINUX) || defined(__APPLE__)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/native/libsaproc/ps_core_common.h Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef _PS_CORE_COMMON_H_
+#define _PS_CORE_COMMON_H_
+
+map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr);
+map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
+ uintptr_t vaddr, size_t memsz);
+void core_release(struct ps_prochandle* ph);
+bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size);
+bool init_classsharing_workaround(struct ps_prochandle* ph);
+
+#endif // _PS_CORE_COMMON_H_
--- a/src/jdk.httpserver/share/classes/com/sun/net/httpserver/spi/HttpServerProvider.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.httpserver/share/classes/com/sun/net/httpserver/spi/HttpServerProvider.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -133,8 +133,9 @@
* <ol>
*
* <li><p> If the system property
- * {@code com.sun.net.httpserver.HttpServerProvider} is defined then it
- * is taken to be the fully-qualified name of a concrete provider class.
+ * {@systemProperty com.sun.net.httpserver.HttpServerProvider}
+ * is defined then it is taken to be the fully-qualified name
+ * of a concrete provider class.
* The class is loaded and instantiated; if this process fails then an
* unspecified unchecked error or exception is thrown. </p></li>
*
--- a/src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/HotSpotGraalManagement.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/HotSpotGraalManagement.java Fri Oct 11 12:08:01 2019 +0530
@@ -36,6 +36,7 @@
import javax.management.ObjectName;
import org.graalvm.compiler.debug.TTY;
+import org.graalvm.compiler.hotspot.GraalHotSpotVMConfig;
import org.graalvm.compiler.hotspot.HotSpotGraalManagementRegistration;
import org.graalvm.compiler.hotspot.HotSpotGraalRuntime;
import org.graalvm.compiler.serviceprovider.ServiceProvider;
@@ -56,7 +57,7 @@
HotSpotGraalManagement nextDeferred;
@Override
- public void initialize(HotSpotGraalRuntime runtime) {
+ public void initialize(HotSpotGraalRuntime runtime, GraalHotSpotVMConfig config) {
if (bean == null) {
if (runtime.getManagement() != this) {
throw new IllegalArgumentException("Cannot initialize a second management object for runtime " + runtime.getName());
--- a/src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/HotSpotGraalRuntimeMBean.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/HotSpotGraalRuntimeMBean.java Fri Oct 11 12:08:01 2019 +0530
@@ -57,7 +57,7 @@
/**
* MBean used to access properties and operations of a {@link HotSpotGraalRuntime} instance.
*/
-final class HotSpotGraalRuntimeMBean implements DynamicMBean {
+public final class HotSpotGraalRuntimeMBean implements DynamicMBean {
/**
* The runtime instance to which this bean provides a management connection.
@@ -69,16 +69,16 @@
*/
private final ObjectName objectName;
- HotSpotGraalRuntimeMBean(ObjectName objectName, HotSpotGraalRuntime runtime) {
+ public HotSpotGraalRuntimeMBean(ObjectName objectName, HotSpotGraalRuntime runtime) {
this.objectName = objectName;
this.runtime = runtime;
}
- ObjectName getObjectName() {
+ public ObjectName getObjectName() {
return objectName;
}
- HotSpotGraalRuntime getRuntime() {
+ public HotSpotGraalRuntime getRuntime() {
return runtime;
}
--- a/src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/JMXServiceProvider.java Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-
-package org.graalvm.compiler.hotspot.management;
-
-import static java.lang.Thread.currentThread;
-
-import java.lang.management.ManagementFactory;
-import java.util.List;
-
-import org.graalvm.compiler.serviceprovider.ServiceProvider;
-import org.graalvm.compiler.serviceprovider.JMXService;
-
-import com.sun.management.ThreadMXBean;
-
-/**
- * Implementation of {@link JMXService} for JDK 13 and later.
- */
-@ServiceProvider(JMXService.class)
-public class JMXServiceProvider extends JMXService {
- private final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
-
- @Override
- protected long getThreadAllocatedBytes(long id) {
- return threadMXBean.getThreadAllocatedBytes(id);
- }
-
- @Override
- protected long getCurrentThreadCpuTime() {
- long[] times = threadMXBean.getThreadCpuTime(new long[]{currentThread().getId()});
- return times[0];
- }
-
- @Override
- protected boolean isThreadAllocatedMemorySupported() {
- return threadMXBean.isThreadAllocatedMemorySupported();
- }
-
- @Override
- protected boolean isCurrentThreadCpuTimeSupported() {
- return threadMXBean.isThreadCpuTimeSupported();
- }
-
- @Override
- protected List<String> getInputArguments() {
- return ManagementFactory.getRuntimeMXBean().getInputArguments();
- }
-}
--- a/src/jdk.internal.vm.compiler.management/share/classes/org.graalvm.compiler.hotspot.management/src/org/graalvm/compiler/hotspot/management/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- * JDK 11 and later versioned overlay for the {@code jdk.internal.vm.compiler.management} module.
- * This cannot be used in JDK 10 where {@code jdk.internal.vm.compiler.management} is a
- * non-upgradeable module.
- */
-
-
-package org.graalvm.compiler.hotspot.management;
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.libgraal/src/jdk/internal/vm/compiler/libgraal/OptionsEncoder.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.libgraal/src/jdk/internal/vm/compiler/libgraal/OptionsEncoder.java Fri Oct 11 12:08:01 2019 +0530
@@ -29,7 +29,7 @@
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
-import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.Map;
/**
@@ -122,7 +122,7 @@
* @throws IllegalArgumentException if {@code input} cannot be decoded
*/
public static Map<String, Object> decode(byte[] input) {
- Map<String, Object> res = new HashMap<>();
+ Map<String, Object> res = new LinkedHashMap<>();
try (DataInputStream in = new DataInputStream(new ByteArrayInputStream(input))) {
final int size = in.readInt();
for (int i = 0; i < size; i++) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64MacroAssembler.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64MacroAssembler.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,11 @@
package org.graalvm.compiler.asm.aarch64;
+import static jdk.vm.ci.aarch64.AArch64.CPU;
+import static jdk.vm.ci.aarch64.AArch64.rscratch1;
+import static jdk.vm.ci.aarch64.AArch64.rscratch2;
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.aarch64.AArch64.zr;
import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.BASE_REGISTER_ONLY;
import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.EXTENDED_REGISTER_OFFSET;
import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.IMMEDIATE_SCALED;
@@ -35,13 +40,6 @@
import static org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan.WorkPlan.NO_WORK;
import org.graalvm.compiler.asm.BranchTargetOutOfBoundsException;
-
-import static jdk.vm.ci.aarch64.AArch64.CPU;
-import static jdk.vm.ci.aarch64.AArch64.r8;
-import static jdk.vm.ci.aarch64.AArch64.r9;
-import static jdk.vm.ci.aarch64.AArch64.sp;
-import static jdk.vm.ci.aarch64.AArch64.zr;
-
import org.graalvm.compiler.asm.Label;
import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.debug.GraalError;
@@ -52,7 +50,7 @@
public class AArch64MacroAssembler extends AArch64Assembler {
- private final ScratchRegister[] scratchRegister = new ScratchRegister[]{new ScratchRegister(r8), new ScratchRegister(r9)};
+ private final ScratchRegister[] scratchRegister = new ScratchRegister[]{new ScratchRegister(rscratch1), new ScratchRegister(rscratch2)};
// Points to the next free scratch register
private int nextFreeScratchRegister = 0;
@@ -1310,6 +1308,20 @@
super.fmsub(size, dst, dst, d, n);
}
+ /**
+ * dst = src1 * src2 + src3.
+ *
+ * @param size register size.
+ * @param dst floating point register. May not be null.
+ * @param src1 floating point register. May not be null.
+ * @param src2 floating point register. May not be null.
+ * @param src3 floating point register. May not be null.
+ */
+ @Override
+ public void fmadd(int size, Register dst, Register src1, Register src2, Register src3) {
+ super.fmadd(size, dst, src1, src2, src3);
+ }
+
/* Branches */
/**
@@ -1367,32 +1379,32 @@
case 64: {
// Be careful with registers: it's possible that x, y, and dst are the same
// register.
- Register rscratch1 = sc1.getRegister();
- Register rscratch2 = sc2.getRegister();
- mul(64, rscratch1, x, y); // Result bits 0..63
- smulh(64, rscratch2, x, y); // Result bits 64..127
+ Register temp1 = sc1.getRegister();
+ Register temp2 = sc2.getRegister();
+ mul(64, temp1, x, y); // Result bits 0..63
+ smulh(64, temp2, x, y); // Result bits 64..127
// Top is pure sign ext
- subs(64, zr, rscratch2, rscratch1, ShiftType.ASR, 63);
+ subs(64, zr, temp2, temp1, ShiftType.ASR, 63);
// Copy all 64 bits of the result into dst
- mov(64, dst, rscratch1);
- mov(rscratch1, 0x80000000);
+ mov(64, dst, temp1);
+ mov(temp1, 0x80000000);
// Develop 0 (EQ), or 0x80000000 (NE)
- cmov(32, rscratch1, rscratch1, zr, ConditionFlag.NE);
- cmp(32, rscratch1, 1);
+ cmov(32, temp1, temp1, zr, ConditionFlag.NE);
+ cmp(32, temp1, 1);
// 0x80000000 - 1 => VS
break;
}
case 32: {
- Register rscratch1 = sc1.getRegister();
- smaddl(rscratch1, x, y, zr);
+ Register temp1 = sc1.getRegister();
+ smaddl(temp1, x, y, zr);
// Copy the low 32 bits of the result into dst
- mov(32, dst, rscratch1);
- subs(64, zr, rscratch1, rscratch1, ExtendType.SXTW, 0);
+ mov(32, dst, temp1);
+ subs(64, zr, temp1, temp1, ExtendType.SXTW, 0);
// NE => overflow
- mov(rscratch1, 0x80000000);
+ mov(temp1, 0x80000000);
// Develop 0 (EQ), or 0x80000000 (NE)
- cmov(32, rscratch1, rscratch1, zr, ConditionFlag.NE);
- cmp(32, rscratch1, 1);
+ cmov(32, temp1, temp1, zr, ConditionFlag.NE);
+ cmp(32, temp1, 1);
// 0x80000000 - 1 => VS
break;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64ArithmeticLIRGenerator.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64ArithmeticLIRGenerator.java Fri Oct 11 12:08:01 2019 +0530
@@ -229,9 +229,12 @@
}
private Value emitMultiplyAddSub(AArch64ArithmeticOp op, Value a, Value b, Value c) {
- assert isNumericInteger(a.getPlatformKind());
- assert isNumericInteger(b.getPlatformKind());
- assert isNumericInteger(c.getPlatformKind());
+ assert a.getPlatformKind() == b.getPlatformKind() && b.getPlatformKind() == c.getPlatformKind();
+ if (op == AArch64ArithmeticOp.ADD || op == AArch64ArithmeticOp.SUB) {
+ assert isNumericInteger(a.getPlatformKind());
+ } else if (op == AArch64ArithmeticOp.FADD) {
+ assert a.getPlatformKind() == AArch64Kind.SINGLE || a.getPlatformKind() == AArch64Kind.DOUBLE;
+ }
Variable result = getLIRGen().newVariable(LIRKind.combine(a, b, c));
AllocatableValue x = moveSp(asAllocatable(a));
@@ -447,6 +450,11 @@
}
@Override
+ public Value emitFusedMultiplyAdd(Value a, Value b, Value c) {
+ return emitMultiplyAddSub(AArch64ArithmeticOp.FADD, a, b, c);
+ }
+
+ @Override
public Value emitCountLeadingZeros(Value value) {
Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AArch64Kind.DWORD));
getLIRGen().append(new AArch64BitManipulationOp(getLIRGen(), CLZ, result, asAllocatable(value)));
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64LIRGenerator.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64LIRGenerator.java Fri Oct 11 12:08:01 2019 +0530
@@ -48,6 +48,10 @@
import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp;
import org.graalvm.compiler.lir.aarch64.AArch64ArrayCompareToOp;
import org.graalvm.compiler.lir.aarch64.AArch64ArrayEqualsOp;
+import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddLSEOp;
+import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddOp;
+import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndWriteOp;
+import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.CompareAndSwapOp;
import org.graalvm.compiler.lir.aarch64.AArch64ByteSwapOp;
import org.graalvm.compiler.lir.aarch64.AArch64Compare;
import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow;
@@ -59,10 +63,6 @@
import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.TableSwitchOp;
import org.graalvm.compiler.lir.aarch64.AArch64LIRFlagsVersioned;
import org.graalvm.compiler.lir.aarch64.AArch64Move;
-import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddOp;
-import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddLSEOp;
-import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.CompareAndSwapOp;
-import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndWriteOp;
import org.graalvm.compiler.lir.aarch64.AArch64Move.MembarOp;
import org.graalvm.compiler.lir.aarch64.AArch64PauseOp;
import org.graalvm.compiler.lir.aarch64.AArch64SpeculativeBarrier;
@@ -586,8 +586,15 @@
}
@Override
- public void emitZeroMemory(Value address, Value length) {
- // Value address is 8-byte aligned; Value length is multiple of 8.
- append(new AArch64ZeroMemoryOp(asAllocatable(address), asAllocatable(length), false, -1));
+ public void emitZeroMemory(Value address, Value length, boolean isAligned) {
+ emitZeroMemory(address, length, isAligned, false, -1);
+ }
+
+ protected final void emitZeroMemory(Value address, Value length, boolean isAligned, boolean useDcZva, int zvaLength) {
+ RegisterValue regAddress = AArch64.r0.asValue(address.getValueKind());
+ RegisterValue regLength = AArch64.r1.asValue(length.getValueKind());
+ emitMove(regAddress, address);
+ emitMove(regLength, length);
+ append(new AArch64ZeroMemoryOp(regAddress, regLength, isAligned, useDcZva, zvaLength));
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64LoweringProviderMixin.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64LoweringProviderMixin.java Fri Oct 11 12:08:01 2019 +0530
@@ -35,7 +35,7 @@
}
@Override
- default int bulkZeroingStride() {
- return 8;
+ default boolean supportsBulkZeroing() {
+ return true;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64LIRGenerator.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64LIRGenerator.java Fri Oct 11 12:08:01 2019 +0530
@@ -687,7 +687,7 @@
}
@Override
- public void emitZeroMemory(Value address, Value length) {
+ public void emitZeroMemory(Value address, Value length, boolean isAligned) {
RegisterValue lengthReg = AMD64.rcx.asValue(length.getValueKind());
emitMove(lengthReg, length);
append(new AMD64ZeroMemoryOp(asAddressValue(address), lengthReg));
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64LoweringProviderMixin.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64LoweringProviderMixin.java Fri Oct 11 12:08:01 2019 +0530
@@ -35,7 +35,7 @@
}
@Override
- default int bulkZeroingStride() {
- return 1;
+ default boolean supportsBulkZeroing() {
+ return true;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.sparc/src/org/graalvm/compiler/core/sparc/SparcLoweringProviderMixin.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.sparc/src/org/graalvm/compiler/core/sparc/SparcLoweringProviderMixin.java Fri Oct 11 12:08:01 2019 +0530
@@ -35,7 +35,7 @@
}
@Override
- default int bulkZeroingStride() {
- return 0;
+ default boolean supportsBulkZeroing() {
+ return false;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CheckGraalInvariants.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CheckGraalInvariants.java Fri Oct 11 12:08:01 2019 +0530
@@ -432,6 +432,16 @@
try {
Class<?> c = Class.forName(className, true, CheckGraalInvariants.class.getClassLoader());
classes.add(c);
+ } catch (UnsupportedClassVersionError e) {
+ // graal-test.jar can contain classes compiled for different Java versions
+ } catch (NoClassDefFoundError e) {
+ if (!e.getMessage().contains("Could not initialize class")) {
+ throw e;
+ } else {
+ // A second or later attempt to initialize a class
+ // results in this confusing error where the
+ // original cause of initialization failure is lost
+ }
} catch (Throwable t) {
tool.handleClassLoadingException(t);
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraphUtilOriginalValueTests.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+package org.graalvm.compiler.core.test;
+
+import java.lang.invoke.ConstantCallSite;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+
+import org.graalvm.compiler.nodes.util.GraphUtil;
+import org.junit.Assert;
+import org.junit.Test;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+
+import jdk.vm.ci.code.BailoutException;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+/**
+ * Unit tests derived from https://github.com/oracle/graal/pull/1690.
+ */
+public class GraphUtilOriginalValueTests extends CustomizedBytecodePatternTest {
+
+ static class LinkedNode {
+ LinkedNode next;
+ }
+
+ static class A extends LinkedNode {
+ }
+
+ static class B extends LinkedNode {
+ }
+
+ static class C extends LinkedNode {
+ }
+
+ public static Class<?> getLastClass(A a) {
+ LinkedNode current = a;
+ Class<?> currentKlass = null;
+ while (current != null) {
+ // This must not be folded to A.class
+ currentKlass = current.getClass();
+
+ current = current.next;
+ }
+ return currentKlass;
+ }
+
+ @Test
+ public void testGetClass() {
+ A a = new A();
+ a.next = new B();
+
+ test("getLastClass", a);
+ }
+
+ static final ConstantCallSite cs1 = init(A.class);
+ static final ConstantCallSite cs2 = init(B.class);
+ static final ConstantCallSite cs3 = init(C.class);
+
+ static ConstantCallSite init(Class<?> c) {
+ try {
+ return new ConstantCallSite(MethodHandles.lookup().unreflectConstructor(c.getDeclaredConstructor()));
+ } catch (Exception e) {
+ throw new InternalError(e);
+ }
+ }
+
+ public static boolean findTarget(MethodHandle key) {
+ ConstantCallSite cs = cs1;
+ while (cs != null) {
+ if (cs.getTarget() == key) {
+ return true;
+ }
+ if (cs == cs1) {
+ cs = cs2;
+ } else if (cs == cs2) {
+ cs = cs3;
+ } else {
+ cs = null;
+ }
+ }
+ return false;
+ }
+
+ @Test
+ public void testGetTarget() {
+ cs1.getTarget();
+ cs2.getTarget();
+ test("findTarget", cs3.getTarget());
+ }
+
+ @Override
+ protected byte[] generateClass(String internalClassName) {
+ ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, internalClassName, null, "java/lang/Object", null);
+
+ String getDescriptor = "(Ljava/lang/Object;)V";
+ MethodVisitor m = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "unbalancedMonitors", getDescriptor, null, null);
+ Label loopHead = new Label();
+ Label end = new Label();
+ m.visitCode();
+
+ // @formatter:off
+ /*
+ * void unbalancedMonitors(Object o) {
+ * monitorenter(o);
+ * while (o.toString() != o) {
+ * monitorexit(o);
+ * o = o.toString();
+ * }
+ * }
+ */
+ // @formatter:on
+
+ m.visitVarInsn(ALOAD, 0);
+ m.visitInsn(MONITORENTER);
+ m.visitLabel(loopHead);
+ m.visitVarInsn(ALOAD, 0);
+ m.visitInsn(MONITOREXIT);
+ m.visitVarInsn(ALOAD, 0);
+ m.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Object", "toString", "()Ljava/lang/String;", false);
+ m.visitVarInsn(ALOAD, 0);
+ m.visitJumpInsn(IF_ACMPEQ, end);
+ m.visitVarInsn(ALOAD, 0);
+ m.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Object", "toString", "()Ljava/lang/String;", false);
+ m.visitVarInsn(ASTORE, 0);
+ m.visitJumpInsn(GOTO, loopHead);
+ m.visitLabel(end);
+ m.visitInsn(RETURN);
+ m.visitMaxs(2, 2);
+ m.visitEnd();
+
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+
+ /**
+ * Tests that the use of {@link GraphUtil#originalValue} in parsing MONITOREXIT correctly
+ * detects unbalanced monitors.
+ */
+ @Test
+ public void testUnbalancedMonitors() throws ClassNotFoundException {
+ Class<?> testClass = getClass("UnbalancedMonitors");
+ ResolvedJavaMethod t1 = getResolvedJavaMethod(testClass, "unbalancedMonitors");
+ try {
+ parseForCompile(t1);
+ Assert.fail("expected a " + BailoutException.class.getName());
+ } catch (BailoutException e) {
+ String msg = e.getMessage();
+ Assert.assertTrue(msg, msg.contains("unbalanced monitors"));
+ }
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifySystemPropertyUsage.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/VerifySystemPropertyUsage.java Fri Oct 11 12:08:01 2019 +0530
@@ -87,8 +87,8 @@
} else if (holderQualified.equals("org.graalvm.compiler.hotspot.JVMCIVersionCheck") && caller.getName().equals("main")) {
// The main method in JVMCIVersionCheck is only called from the shell
return;
- } else if (packageName.startsWith("com.oracle.truffle") || packageName.startsWith("org.graalvm.polyglot")) {
- // Truffle and Polyglot do not depend on JVMCI so cannot use
+ } else if (packageName.startsWith("com.oracle.truffle") || packageName.startsWith("org.graalvm.polyglot") || packageName.startsWith("org.graalvm.home")) {
+ // Truffle and SDK do not depend on JVMCI so they cannot use
// Services.getSavedProperties()
return;
} else if (packageName.startsWith("com.oracle.svm")) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph.test/src/org/graalvm/compiler/graph/test/graphio/GraphOutputTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph.test/src/org/graalvm/compiler/graph/test/graphio/GraphOutputTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -40,7 +40,10 @@
import java.util.Objects;
import org.graalvm.graphio.GraphOutput;
import org.graalvm.graphio.GraphStructure;
+import org.graalvm.graphio.GraphTypes;
+import static org.junit.Assert.assertSame;
import org.junit.Test;
+import java.lang.reflect.Field;
public final class GraphOutputTest {
@@ -116,6 +119,18 @@
assertArrayEquals(expected.toByteArray(), embedded.toByteArray());
}
+ @Test
+ @SuppressWarnings({"static-method", "unchecked"})
+ public void testClassOfEnumValueWithImplementation() throws ClassNotFoundException, ReflectiveOperationException {
+ Class<? extends GraphTypes> defaultTypesClass = (Class<? extends GraphTypes>) Class.forName("org.graalvm.graphio.DefaultGraphTypes");
+ Field f = defaultTypesClass.getDeclaredField("DEFAULT");
+ f.setAccessible(true);
+ GraphTypes types = (GraphTypes) f.get(null);
+
+ Object clazz = types.enumClass(CustomEnum.ONE);
+ assertSame(CustomEnum.class, clazz);
+ }
+
private static ByteBuffer generateData(int size) {
ByteBuffer buffer = ByteBuffer.allocate(size);
for (int i = 0; i < size; i++) {
@@ -281,4 +296,20 @@
private static final class MockGraph {
}
+
+ private enum CustomEnum {
+ ONE() {
+ @Override
+ public String toString() {
+ return "one";
+ }
+ },
+
+ TWO() {
+ @Override
+ public String toString() {
+ return "two";
+ }
+ }
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeClass.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeClass.java Fri Oct 11 12:08:01 2019 +0530
@@ -741,6 +741,7 @@
} else {
Object objectA = data.getObject(a, i);
Object objectB = data.getObject(b, i);
+ assert !isLambda(objectA) || !isLambda(objectB) : "lambdas are not permitted in fields of " + this.toString();
if (objectA != objectB) {
if (objectA != null && objectB != null) {
if (!deepEquals0(objectA, objectB)) {
@@ -755,6 +756,11 @@
return true;
}
+ private static boolean isLambda(Object obj) {
+ // This needs to be consistent with InnerClassLambdaMetafactory constructor.
+ return obj != null && obj.getClass().getSimpleName().contains("$$Lambda$");
+ }
+
public boolean isValid(Position pos, NodeClass<?> from, Edges fromEdges) {
if (this == from) {
return true;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeList.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeList.java Fri Oct 11 12:08:01 2019 +0530
@@ -122,10 +122,6 @@
size = newSize;
}
- public boolean isList() {
- return true;
- }
-
protected abstract void update(T oldNode, T newNode);
public abstract Edges.Type getEdgesType();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeMap.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeMap.java Fri Oct 11 12:08:01 2019 +0530
@@ -95,6 +95,9 @@
public void set(Node node, T value) {
assert check(node);
+ if (!node.isAlive()) {
+ throw new VerificationError("this node is not alive: " + node);
+ }
values[getNodeId(node)] = value;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotBackendFactory.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotBackendFactory.java Fri Oct 11 12:08:01 2019 +0530
@@ -192,7 +192,7 @@
replacements,
options);
AArch64GraphBuilderPlugins.register(plugins, replacements.getDefaultReplacementBytecodeProvider(), false,
- /* registerMathPlugins */true);
+ /* registerMathPlugins */true, /* emitJDK9StringSubstitutions */true, config.useFMAIntrinsics);
return plugins;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotLIRGenerator.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotLIRGenerator.java Fri Oct 11 12:08:01 2019 +0530
@@ -84,7 +84,6 @@
import org.graalvm.compiler.lir.aarch64.AArch64PrefetchOp;
import org.graalvm.compiler.lir.aarch64.AArch64RestoreRegistersOp;
import org.graalvm.compiler.lir.aarch64.AArch64SaveRegistersOp;
-import org.graalvm.compiler.lir.aarch64.AArch64ZeroMemoryOp;
import org.graalvm.compiler.lir.gen.LIRGenerationResult;
import org.graalvm.compiler.options.OptionValues;
@@ -544,7 +543,7 @@
}
@Override
- public void emitZeroMemory(Value address, Value length) {
+ public void emitZeroMemory(Value address, Value length, boolean isAligned) {
int dczidValue = config.psrInfoDczidValue;
EnumSet<AArch64.Flag> flags = ((AArch64) target().arch).getFlags();
@@ -563,7 +562,6 @@
useDcZva = false;
}
- // Value address is 8-byte aligned; Value length is multiple of 8.
- append(new AArch64ZeroMemoryOp(asAllocatable(address), asAllocatable(length), useDcZva, zvaLength));
+ emitZeroMemory(address, length, isAligned, useDcZva, zvaLength);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotRegisterAllocationConfig.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotRegisterAllocationConfig.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,10 @@
import static jdk.vm.ci.aarch64.AArch64.r25;
import static jdk.vm.ci.aarch64.AArch64.r26;
import static jdk.vm.ci.aarch64.AArch64.r28;
+import static jdk.vm.ci.aarch64.AArch64.r29;
import static jdk.vm.ci.aarch64.AArch64.r3;
+import static jdk.vm.ci.aarch64.AArch64.r30;
+import static jdk.vm.ci.aarch64.AArch64.r31;
import static jdk.vm.ci.aarch64.AArch64.r4;
import static jdk.vm.ci.aarch64.AArch64.r5;
import static jdk.vm.ci.aarch64.AArch64.r6;
@@ -96,12 +99,22 @@
public class AArch64HotSpotRegisterAllocationConfig extends RegisterAllocationConfig {
+ /**
+ * Excluding r27 is a temporary solution until we exclude r27 unconditionally at
+ * {@link jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig}.
+ *
+ * The underlying reason is that HotSpot does not intend to support r27 as an allocatable
+ * register. This register is excluded from callee-saved register at
+ * cpu/aarch64/sharedRuntime_aarch64.cpp:RegisterSaver::save_live_registers, and may lead to
+ * dereferencing unknown value from the stack at
+ * share/runtime/stackValue.cpp:StackValue::create_stack_value during deoptimization.
+ */
// @formatter:off
static final Register[] registerAllocationOrder = {
r0, r1, r2, r3, r4, r5, r6, r7,
r8, r9, r10, r11, r12, r13, r14, r15,
r16, r17, r18, r19, r20, r21, r22, r23,
- r24, r25, r26, /* r27, */ r28, /* r29, r30, r31 */
+ r24, r25, r26, /* r27, */ r28, r29, r30, r31,
v0, v1, v2, v3, v4, v5, v6, v7,
v8, v9, v10, v11, v12, v13, v14, v15,
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.lir.test/src/org/graalvm/compiler/hotspot/lir/test/MitigateExceedingMaxOopMapStackOffsetTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.lir.test/src/org/graalvm/compiler/hotspot/lir/test/MitigateExceedingMaxOopMapStackOffsetTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -76,23 +76,22 @@
// Place reference slots at top and bottom of virtual frame
// with primitive slots in the middle. This tests that slot
// partitioning works.
+ AllocatableValue srcObject = gen.emitLoadConstant(objectLirKind, objectConstant);
for (int i = 0; i < numReferenceSlots / 2; i++) {
- AllocatableValue src = gen.emitLoadConstant(objectLirKind, objectConstant);
VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(objectLirKind);
slotList.add(slot);
- gen.emitMove(slot, src);
+ gen.emitMove(slot, srcObject);
}
+ AllocatableValue srcPrimitive = gen.emitLoadConstant(objectLirKind, primitiveConstant);
for (int i = 0; i < numPrimitiveSlots; i++) {
- AllocatableValue src = gen.emitLoadConstant(objectLirKind, primitiveConstant);
VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(primitiveLirKind);
slotList.add(slot);
- gen.emitMove(slot, src);
+ gen.emitMove(slot, srcPrimitive);
}
for (int i = numReferenceSlots / 2; i < numReferenceSlots; i++) {
- AllocatableValue src = gen.emitLoadConstant(objectLirKind, objectConstant);
VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(objectLirKind);
slotList.add(slot);
- gen.emitMove(slot, src);
+ gen.emitMove(slot, srcObject);
}
slots = slotList.toArray(new AllocatableValue[slotList.size()]);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java Fri Oct 11 12:08:01 2019 +0530
@@ -64,6 +64,7 @@
import jdk.vm.ci.meta.MetaUtil;
import jdk.vm.ci.meta.MethodHandleAccessProvider.IntrinsicMethod;
import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.sparc.SPARC;
/**
* Checks the intrinsics implemented by Graal against the set of intrinsics declared by HotSpot. The
@@ -375,7 +376,7 @@
add(ignore,
"java/lang/Math.fma(DDD)D",
"java/lang/Math.fma(FFF)F");
- } else if (!(arch instanceof AMD64)) {
+ } else if (arch instanceof SPARC) {
add(toBeInvestigated,
"java/lang/Math.fma(DDD)D",
"java/lang/Math.fma(FFF)F");
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/ConstantPoolSubstitutionsTests.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/ConstantPoolSubstitutionsTests.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,8 +25,6 @@
package org.graalvm.compiler.hotspot.test;
-import static org.graalvm.compiler.serviceprovider.JavaVersionUtil.JAVA_SPEC;
-
import java.lang.reflect.Method;
import org.graalvm.compiler.core.test.GraalCompilerTest;
@@ -75,9 +73,27 @@
return graph;
}
+ private static String getMiscPackage() {
+ if (JavaVersionUtil.JAVA_SPEC <= 8) {
+ return "sun.misc";
+ }
+ try {
+ String miscPackage = "jdk.internal.access";
+ Class.forName(miscPackage + ".SharedSecrets");
+ return miscPackage;
+ } catch (ClassNotFoundException e) {
+ try {
+ String miscPackage = "jdk.internal.misc";
+ Class.forName(miscPackage + ".SharedSecrets");
+ return miscPackage;
+ } catch (ClassNotFoundException ex) {
+ }
+ throw new AssertionError(e);
+ }
+ }
+
private static Object getConstantPoolForObject() {
- String miscPackage = JavaVersionUtil.JAVA_SPEC <= 8 ? "sun.misc"
- : (JavaVersionUtil.JAVA_SPEC <= 11 ? "jdk.internal.misc" : "jdk.internal.access");
+ String miscPackage = getMiscPackage();
try {
Class<?> sharedSecretsClass = Class.forName(miscPackage + ".SharedSecrets");
Class<?> javaLangAccessClass = Class.forName(miscPackage + ".JavaLangAccess");
@@ -112,8 +128,7 @@
* This test uses some non-public API.
*/
private static void addExports(Class<?> c) {
- String packageName = JAVA_SPEC <= 11 ? "jdk.internal.misc" : "jdk.internal.access";
- ModuleSupport.exportPackageTo(String.class, packageName, c);
+ ModuleSupport.exportPackageTo(String.class, getMiscPackage(), c);
ModuleSupport.exportPackageTo(String.class, "jdk.internal.reflect", c);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Fri Oct 11 12:08:01 2019 +0530
@@ -321,6 +321,7 @@
public final int jvmciCountersThreadOffset = getFieldOffset("JavaThread::_jvmci_counters", Integer.class, "jlong*");
public final int doingUnsafeAccessOffset = getFieldOffset("JavaThread::_doing_unsafe_access", Integer.class, "bool", Integer.MAX_VALUE);
public final int javaThreadReservedStackActivationOffset = versioned.javaThreadReservedStackActivationOffset;
+ public final int jniEnvironmentOffset = getFieldOffset("JavaThread::_jni_environment", Integer.class, "JNIEnv", Integer.MIN_VALUE);
public boolean requiresReservedStackCheck(List<ResolvedJavaMethod> methods) {
if (enableStackReservedZoneAddress != 0 && methods != null) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalManagementRegistration.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalManagementRegistration.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
* Completes the initialization of this registration by recording the
* {@link HotSpotGraalRuntime} the MBean will provide an JMX interface to.
*/
- void initialize(HotSpotGraalRuntime runtime);
+ void initialize(HotSpotGraalRuntime runtime, GraalHotSpotVMConfig config);
/**
* Polls this registration to see if the MBean is registered in a MBean server.
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java Fri Oct 11 12:08:01 2019 +0530
@@ -35,6 +35,7 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
import jdk.internal.vm.compiler.collections.EconomicMap;
import jdk.internal.vm.compiler.collections.EconomicSet;
@@ -97,6 +98,10 @@
public final class HotSpotGraalRuntime implements HotSpotGraalRuntimeProvider {
private static final boolean IS_AOT = Boolean.parseBoolean(Services.getSavedProperties().get("com.oracle.graalvm.isaot"));
+ /**
+ * A factory for {@link HotSpotGraalManagementRegistration} injected by {@code LibGraalFeature}.
+ */
+ private static final Supplier<HotSpotGraalManagementRegistration> AOT_INJECTED_MANAGEMENT = null;
private static boolean checkArrayIndexScaleInvariants(MetaAccessProvider metaAccess) {
assert metaAccess.getArrayIndexScale(JavaKind.Byte) == 1;
@@ -165,12 +170,12 @@
compilerConfigurationName = compilerConfigurationFactory.getName();
if (IS_AOT) {
- management = null;
+ management = AOT_INJECTED_MANAGEMENT == null ? null : AOT_INJECTED_MANAGEMENT.get();
} else {
management = GraalServices.loadSingle(HotSpotGraalManagementRegistration.class, false);
- if (management != null) {
- management.initialize(this);
- }
+ }
+ if (management != null) {
+ management.initialize(this, config);
}
BackendMap backendMap = compilerConfigurationFactory.createBackendMap();
@@ -292,13 +297,15 @@
HotSpotResolvedObjectType type = ((HotSpotResolvedJavaMethod) compilable).getDeclaringClass();
if (type instanceof HotSpotResolvedJavaType) {
Class<?> clazz = runtime().getMirror(type);
- try {
- ClassLoader cl = clazz.getClassLoader();
- if (cl != null) {
- loaders.add(cl);
+ if (clazz != null) {
+ try {
+ ClassLoader cl = clazz.getClassLoader();
+ if (cl != null) {
+ loaders.add(cl);
+ }
+ } catch (SecurityException e) {
+ // This loader can obviously not be used for resolving class names
}
- } catch (SecurityException e) {
- // This loader can obviously not be used for resolving class names
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/JVMCIVersionCheck.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/JVMCIVersionCheck.java Fri Oct 11 12:08:01 2019 +0530
@@ -43,7 +43,7 @@
*/
public final class JVMCIVersionCheck {
- private static final Version JVMCI8_MIN_VERSION = new Version3(19, 2, 1);
+ private static final Version JVMCI8_MIN_VERSION = new Version3(19, 3, 2);
public interface Version {
boolean isLessThan(Version other);
@@ -230,6 +230,17 @@
failVersionCheck(props, exitOnFailure, "Could not parse the JDK 11 early access build number from java.vm.version property: %s.%n", vmVersion);
return;
}
+ } else if (vmVersion.contains("-jvmci-")) {
+ // A "labsjdk"
+ Version v = Version.parse(vmVersion);
+ if (v != null) {
+ if (v.isLessThan(minVersion)) {
+ failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion);
+ }
+ return;
+ }
+ failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" +
+ "Cannot read JVMCI version from java.vm.version property: %s.%n", vmVersion);
} else {
// Graal is compatible with all JDK versions as of 11 GA.
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java Fri Oct 11 12:08:01 2019 +0530
@@ -254,7 +254,7 @@
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver) {
ValueNode callSite = receiver.get();
- ValueNode folded = CallSiteTargetNode.tryFold(GraphUtil.originalValue(callSite), b.getMetaAccess(), b.getAssumptions());
+ ValueNode folded = CallSiteTargetNode.tryFold(GraphUtil.originalValue(callSite, true), b.getMetaAccess(), b.getAssumptions());
if (folded != null) {
b.addPush(JavaKind.Object, folded);
} else {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/NewObjectSnippets.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/NewObjectSnippets.java Fri Oct 11 12:08:01 2019 +0530
@@ -359,12 +359,12 @@
@ConstantParameter Register threadRegister,
@ConstantParameter boolean maybeUnroll,
@ConstantParameter String typeContext,
- @ConstantParameter int bulkZeroingStride,
+ @ConstantParameter boolean supportsBulkZeroing,
@ConstantParameter Counters counters) {
// Primitive array types are eagerly pre-resolved. We can use a floating load.
KlassPointer picHub = LoadConstantIndirectlyNode.loadKlass(hub);
return allocateArrayImpl(picHub, length, prototypeMarkWord, headerSize, log2ElementSize, fillContents,
- emitMemoryBarrier, threadRegister, maybeUnroll, typeContext, bulkZeroingStride, counters);
+ emitMemoryBarrier, threadRegister, maybeUnroll, typeContext, supportsBulkZeroing, counters);
}
@Snippet
@@ -378,12 +378,12 @@
@ConstantParameter Register threadRegister,
@ConstantParameter boolean maybeUnroll,
@ConstantParameter String typeContext,
- @ConstantParameter int bulkZeroingStride,
+ @ConstantParameter boolean supportsBulkZeroing,
@ConstantParameter Counters counters) {
// Array type would be resolved by dominating resolution.
KlassPointer picHub = LoadConstantIndirectlyFixedNode.loadKlass(hub);
return allocateArrayImpl(picHub, length, prototypeMarkWord, headerSize, log2ElementSize, fillContents,
- emitMemoryBarrier, threadRegister, maybeUnroll, typeContext, bulkZeroingStride, counters);
+ emitMemoryBarrier, threadRegister, maybeUnroll, typeContext, supportsBulkZeroing, counters);
}
@Snippet
@@ -397,7 +397,7 @@
@ConstantParameter Register threadRegister,
@ConstantParameter boolean maybeUnroll,
@ConstantParameter String typeContext,
- @ConstantParameter int bulkZeroingStride,
+ @ConstantParameter boolean supportsBulkZeroing,
@ConstantParameter Counters counters) {
Object result = allocateArrayImpl(hub,
length,
@@ -408,7 +408,7 @@
emitMemoryBarrier, threadRegister,
maybeUnroll,
typeContext,
- bulkZeroingStride,
+ supportsBulkZeroing,
counters);
return piArrayCastToSnippetReplaceeStamp(verifyOop(result), length);
}
@@ -432,7 +432,7 @@
Register threadRegister,
boolean maybeUnroll,
String typeContext,
- int bulkZeroingStride,
+ boolean supportsBulkZeroing,
Counters counters) {
Object result;
long allocationSize = arrayAllocationSize(length, headerSize, log2ElementSize);
@@ -448,7 +448,7 @@
if (theCounters != null && theCounters.arrayLoopInit != null) {
theCounters.arrayLoopInit.inc();
}
- result = formatArray(hub, allocationSize, length, headerSize, top, prototypeMarkWord, fillContents, emitMemoryBarrier, maybeUnroll, bulkZeroingStride, counters);
+ result = formatArray(hub, allocationSize, length, headerSize, top, prototypeMarkWord, fillContents, emitMemoryBarrier, maybeUnroll, supportsBulkZeroing, counters);
} else {
result = newArrayStub(hub, length);
}
@@ -513,11 +513,11 @@
@ConstantParameter Register threadRegister,
@ConstantParameter JavaKind knownElementKind,
@ConstantParameter int knownLayoutHelper,
- @ConstantParameter int bulkZeroingStride,
+ @ConstantParameter boolean supportsBulkZeroing,
Word prototypeMarkWord,
@ConstantParameter Counters counters) {
Object result = allocateArrayDynamicImpl(elementType, voidClass, length, fillContents, emitMemoryBarrier, threadRegister, knownElementKind,
- knownLayoutHelper, bulkZeroingStride, prototypeMarkWord, counters);
+ knownLayoutHelper, supportsBulkZeroing, prototypeMarkWord, counters);
return result;
}
@@ -529,7 +529,7 @@
Register threadRegister,
JavaKind knownElementKind,
int knownLayoutHelper,
- int bulkZeroingStride,
+ boolean supportsBulkZeroing,
Word prototypeMarkWord,
Counters counters) {
/*
@@ -574,7 +574,7 @@
int log2ElementSize = (layoutHelper >> layoutHelperLog2ElementSizeShift(INJECTED_VMCONFIG)) & layoutHelperLog2ElementSizeMask(INJECTED_VMCONFIG);
Object result = allocateArrayImpl(nonNullKlass, length, prototypeMarkWord, headerSize, log2ElementSize, fillContents,
- emitMemoryBarrier, threadRegister, false, "dynamic type", bulkZeroingStride, counters);
+ emitMemoryBarrier, threadRegister, false, "dynamic type", supportsBulkZeroing, counters);
return piArrayCastToSnippetReplaceeStamp(verifyOop(result), length);
}
@@ -627,16 +627,16 @@
* @param endOffset offset to stop zeroing (exclusive). May not be word aligned.
* @param isEndOffsetConstant is {@code endOffset} known to be constant in the snippet
* @param manualUnroll maximally unroll zeroing
- * @param bulkZeroingStride stride of bulk zeroing supported by the backend
+ * @param supportsBulkZeroing whether bulk zeroing is supported by the backend
*/
private static void zeroMemory(Word memory, int startOffset, long endOffset, boolean isEndOffsetConstant, boolean manualUnroll,
- int bulkZeroingStride, Counters counters) {
- fillMemory(0, memory, startOffset, endOffset, isEndOffsetConstant, manualUnroll, bulkZeroingStride, counters);
+ boolean supportsBulkZeroing, Counters counters) {
+ fillMemory(0, memory, startOffset, endOffset, isEndOffsetConstant, manualUnroll, supportsBulkZeroing, counters);
}
- private static void fillMemory(long value, Word memory, int startOffset, long offsetLimit, boolean constantOffsetLimit, boolean manualUnroll,
- int bulkZeroingStride, Counters counters) {
- ReplacementsUtil.runtimeAssert((offsetLimit & 0x7) == 0, "unaligned object size");
+ private static void fillMemory(long value, Word memory, int startOffset, long endOffset, boolean constantOffsetLimit, boolean manualUnroll,
+ boolean supportsBulkZeroing, Counters counters) {
+ ReplacementsUtil.runtimeAssert((endOffset & 0x7) == 0, "unaligned object size");
int offset = startOffset;
if ((offset & 0x7) != 0) {
memory.writeInt(offset, (int) value, LocationIdentity.init());
@@ -644,7 +644,7 @@
}
ReplacementsUtil.runtimeAssert((offset & 0x7) == 0, "unaligned offset");
Counters theCounters = counters;
- if (manualUnroll && ((offsetLimit - offset) / 8) <= MAX_UNROLLED_OBJECT_ZEROING_STORES) {
+ if (manualUnroll && ((endOffset - offset) / 8) <= MAX_UNROLLED_OBJECT_ZEROING_STORES) {
ReplacementsUtil.staticAssert(!constantOffsetLimit, "size shouldn't be constant at instantiation time");
// This case handles arrays of constant length. Instead of having a snippet variant for
// each length, generate a chain of stores of maximum length. Once it's inlined the
@@ -655,7 +655,7 @@
explodeLoop();
for (int i = 0; i < MAX_UNROLLED_OBJECT_ZEROING_STORES; i++, offset += 8) {
- if (offset == offsetLimit) {
+ if (offset == endOffset) {
break;
}
memory.initializeLong(offset, value, LocationIdentity.init());
@@ -663,13 +663,13 @@
} else {
// Use Word instead of int to avoid extension to long in generated code
Word off = WordFactory.signed(offset);
- if (bulkZeroingStride > 0 && value == 0 && probability(SLOW_PATH_PROBABILITY, (offsetLimit - offset) >= getMinimalBulkZeroingSize(INJECTED_OPTIONVALUES))) {
+ if (supportsBulkZeroing && value == 0 && probability(SLOW_PATH_PROBABILITY, (endOffset - offset) >= getMinimalBulkZeroingSize(INJECTED_OPTIONVALUES))) {
if (theCounters != null && theCounters.instanceBulkInit != null) {
theCounters.instanceBulkInit.inc();
}
- ZeroMemoryNode.zero(memory.add(off), offsetLimit - offset, LocationIdentity.init());
+ ZeroMemoryNode.zero(memory.add(off), endOffset - offset, true, LocationIdentity.init());
} else {
- if (constantOffsetLimit && ((offsetLimit - offset) / 8) <= MAX_UNROLLED_OBJECT_ZEROING_STORES) {
+ if (constantOffsetLimit && ((endOffset - offset) / 8) <= MAX_UNROLLED_OBJECT_ZEROING_STORES) {
if (theCounters != null && theCounters.instanceSeqInit != null) {
theCounters.instanceSeqInit.inc();
}
@@ -679,7 +679,7 @@
theCounters.instanceLoopInit.inc();
}
}
- for (; off.rawValue() < offsetLimit; off = off.add(8)) {
+ for (; off.rawValue() < endOffset; off = off.add(8)) {
memory.initializeLong(off, value, LocationIdentity.init());
}
}
@@ -703,7 +703,7 @@
* @param manualUnroll maximally unroll zeroing
*/
private static void fillWithGarbage(Word memory, int startOffset, long endOffset, boolean isEndOffsetConstant, boolean manualUnroll, Counters counters) {
- fillMemory(0xfefefefefefefefeL, memory, startOffset, endOffset, isEndOffsetConstant, manualUnroll, 0, counters);
+ fillMemory(0xfefefefefefefefeL, memory, startOffset, endOffset, isEndOffsetConstant, manualUnroll, false, counters);
}
/**
@@ -720,7 +720,7 @@
Word prototypeMarkWord = useBiasedLocking(INJECTED_VMCONFIG) ? hub.readWord(prototypeMarkWordOffset(INJECTED_VMCONFIG), PROTOTYPE_MARK_WORD_LOCATION) : compileTimePrototypeMarkWord;
initializeObjectHeader(memory, prototypeMarkWord, hub);
if (fillContents) {
- zeroMemory(memory, instanceHeaderSize(INJECTED_VMCONFIG), size, constantSize, false, 0, counters);
+ zeroMemory(memory, instanceHeaderSize(INJECTED_VMCONFIG), size, constantSize, false, false, counters);
} else if (REPLACEMENTS_ASSERTIONS_ENABLED) {
fillWithGarbage(memory, instanceHeaderSize(INJECTED_VMCONFIG), size, constantSize, false, counters);
}
@@ -767,7 +767,7 @@
boolean fillContents,
boolean emitMemoryBarrier,
boolean maybeUnroll,
- int bulkZeroingStride,
+ boolean supportsBulkZeroing,
Counters counters) {
memory.writeInt(arrayLengthOffset(INJECTED_VMCONFIG), length, LocationIdentity.init());
/*
@@ -776,7 +776,7 @@
*/
initializeObjectHeader(memory, prototypeMarkWord, hub);
if (fillContents) {
- zeroMemory(memory, headerSize, allocationSize, false, maybeUnroll, bulkZeroingStride, counters);
+ zeroMemory(memory, headerSize, allocationSize, false, maybeUnroll, supportsBulkZeroing, counters);
} else if (REPLACEMENTS_ASSERTIONS_ENABLED) {
fillWithGarbage(memory, headerSize, allocationSize, false, maybeUnroll, counters);
}
@@ -897,7 +897,7 @@
args.addConst("threadRegister", registers.getThreadRegister());
args.addConst("maybeUnroll", length.isConstant());
args.addConst("typeContext", ProfileAllocations.getValue(localOptions) ? arrayType.toJavaName(false) : "");
- args.addConst("bulkZeroingStride", tool.getLowerer().bulkZeroingStride());
+ args.addConst("supportsBulkZeroing", tool.getLowerer().supportsBulkZeroing());
args.addConst("counters", counters);
SnippetTemplate template = template(newArrayNode, args);
graph.getDebug().log("Lowering allocateArray in %s: node=%s, template=%s, arguments=%s", graph, newArrayNode, template, args);
@@ -941,7 +941,7 @@
} else {
args.addConst("knownLayoutHelper", 0);
}
- args.addConst("bulkZeroingStride", tool.getLowerer().bulkZeroingStride());
+ args.addConst("supportsBulkZeroing", tool.getLowerer().supportsBulkZeroing());
args.add("prototypeMarkWord", lookupArrayClass(tool, JavaKind.Object).prototypeMarkWord());
args.addConst("counters", counters);
SnippetTemplate template = template(newArrayNode, args);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BciBlockMapping.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BciBlockMapping.java Fri Oct 11 12:08:01 2019 +0530
@@ -26,27 +26,108 @@
import static org.graalvm.compiler.bytecode.Bytecodes.AALOAD;
import static org.graalvm.compiler.bytecode.Bytecodes.AASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.ACONST_NULL;
+import static org.graalvm.compiler.bytecode.Bytecodes.ALOAD;
+import static org.graalvm.compiler.bytecode.Bytecodes.ALOAD_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.ALOAD_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.ALOAD_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.ALOAD_3;
import static org.graalvm.compiler.bytecode.Bytecodes.ANEWARRAY;
import static org.graalvm.compiler.bytecode.Bytecodes.ARETURN;
import static org.graalvm.compiler.bytecode.Bytecodes.ARRAYLENGTH;
+import static org.graalvm.compiler.bytecode.Bytecodes.ASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.ASTORE_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.ASTORE_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.ASTORE_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.ASTORE_3;
import static org.graalvm.compiler.bytecode.Bytecodes.ATHROW;
import static org.graalvm.compiler.bytecode.Bytecodes.BALOAD;
import static org.graalvm.compiler.bytecode.Bytecodes.BASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.BIPUSH;
+import static org.graalvm.compiler.bytecode.Bytecodes.BREAKPOINT;
import static org.graalvm.compiler.bytecode.Bytecodes.CALOAD;
import static org.graalvm.compiler.bytecode.Bytecodes.CASTORE;
import static org.graalvm.compiler.bytecode.Bytecodes.CHECKCAST;
+import static org.graalvm.compiler.bytecode.Bytecodes.D2F;
+import static org.graalvm.compiler.bytecode.Bytecodes.D2I;
+import static org.graalvm.compiler.bytecode.Bytecodes.D2L;
+import static org.graalvm.compiler.bytecode.Bytecodes.DADD;
import static org.graalvm.compiler.bytecode.Bytecodes.DALOAD;
import static org.graalvm.compiler.bytecode.Bytecodes.DASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.DCMPG;
+import static org.graalvm.compiler.bytecode.Bytecodes.DCMPL;
+import static org.graalvm.compiler.bytecode.Bytecodes.DCONST_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.DCONST_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.DDIV;
+import static org.graalvm.compiler.bytecode.Bytecodes.DLOAD;
+import static org.graalvm.compiler.bytecode.Bytecodes.DLOAD_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.DLOAD_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.DLOAD_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.DLOAD_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.DMUL;
+import static org.graalvm.compiler.bytecode.Bytecodes.DNEG;
+import static org.graalvm.compiler.bytecode.Bytecodes.DREM;
import static org.graalvm.compiler.bytecode.Bytecodes.DRETURN;
+import static org.graalvm.compiler.bytecode.Bytecodes.DSTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.DSTORE_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.DSTORE_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.DSTORE_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.DSTORE_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.DSUB;
+import static org.graalvm.compiler.bytecode.Bytecodes.DUP;
+import static org.graalvm.compiler.bytecode.Bytecodes.DUP2;
+import static org.graalvm.compiler.bytecode.Bytecodes.DUP2_X1;
+import static org.graalvm.compiler.bytecode.Bytecodes.DUP2_X2;
+import static org.graalvm.compiler.bytecode.Bytecodes.DUP_X1;
+import static org.graalvm.compiler.bytecode.Bytecodes.DUP_X2;
+import static org.graalvm.compiler.bytecode.Bytecodes.F2D;
+import static org.graalvm.compiler.bytecode.Bytecodes.F2I;
+import static org.graalvm.compiler.bytecode.Bytecodes.F2L;
+import static org.graalvm.compiler.bytecode.Bytecodes.FADD;
import static org.graalvm.compiler.bytecode.Bytecodes.FALOAD;
import static org.graalvm.compiler.bytecode.Bytecodes.FASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.FCMPG;
+import static org.graalvm.compiler.bytecode.Bytecodes.FCMPL;
+import static org.graalvm.compiler.bytecode.Bytecodes.FCONST_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.FCONST_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.FCONST_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.FDIV;
+import static org.graalvm.compiler.bytecode.Bytecodes.FLOAD;
+import static org.graalvm.compiler.bytecode.Bytecodes.FLOAD_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.FLOAD_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.FLOAD_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.FLOAD_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.FMUL;
+import static org.graalvm.compiler.bytecode.Bytecodes.FNEG;
+import static org.graalvm.compiler.bytecode.Bytecodes.FREM;
import static org.graalvm.compiler.bytecode.Bytecodes.FRETURN;
+import static org.graalvm.compiler.bytecode.Bytecodes.FSTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.FSTORE_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.FSTORE_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.FSTORE_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.FSTORE_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.FSUB;
import static org.graalvm.compiler.bytecode.Bytecodes.GETFIELD;
import static org.graalvm.compiler.bytecode.Bytecodes.GETSTATIC;
import static org.graalvm.compiler.bytecode.Bytecodes.GOTO;
import static org.graalvm.compiler.bytecode.Bytecodes.GOTO_W;
+import static org.graalvm.compiler.bytecode.Bytecodes.I2B;
+import static org.graalvm.compiler.bytecode.Bytecodes.I2C;
+import static org.graalvm.compiler.bytecode.Bytecodes.I2D;
+import static org.graalvm.compiler.bytecode.Bytecodes.I2F;
+import static org.graalvm.compiler.bytecode.Bytecodes.I2L;
+import static org.graalvm.compiler.bytecode.Bytecodes.I2S;
+import static org.graalvm.compiler.bytecode.Bytecodes.IADD;
import static org.graalvm.compiler.bytecode.Bytecodes.IALOAD;
+import static org.graalvm.compiler.bytecode.Bytecodes.IAND;
import static org.graalvm.compiler.bytecode.Bytecodes.IASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.ICONST_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.ICONST_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.ICONST_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.ICONST_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.ICONST_4;
+import static org.graalvm.compiler.bytecode.Bytecodes.ICONST_5;
+import static org.graalvm.compiler.bytecode.Bytecodes.ICONST_M1;
import static org.graalvm.compiler.bytecode.Bytecodes.IDIV;
import static org.graalvm.compiler.bytecode.Bytecodes.IFEQ;
import static org.graalvm.compiler.bytecode.Bytecodes.IFGE;
@@ -64,33 +145,88 @@
import static org.graalvm.compiler.bytecode.Bytecodes.IF_ICMPLE;
import static org.graalvm.compiler.bytecode.Bytecodes.IF_ICMPLT;
import static org.graalvm.compiler.bytecode.Bytecodes.IF_ICMPNE;
+import static org.graalvm.compiler.bytecode.Bytecodes.IINC;
+import static org.graalvm.compiler.bytecode.Bytecodes.ILOAD;
+import static org.graalvm.compiler.bytecode.Bytecodes.ILOAD_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.ILOAD_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.ILOAD_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.ILOAD_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.IMUL;
+import static org.graalvm.compiler.bytecode.Bytecodes.INEG;
+import static org.graalvm.compiler.bytecode.Bytecodes.INSTANCEOF;
import static org.graalvm.compiler.bytecode.Bytecodes.INVOKEDYNAMIC;
import static org.graalvm.compiler.bytecode.Bytecodes.INVOKEINTERFACE;
import static org.graalvm.compiler.bytecode.Bytecodes.INVOKESPECIAL;
import static org.graalvm.compiler.bytecode.Bytecodes.INVOKESTATIC;
import static org.graalvm.compiler.bytecode.Bytecodes.INVOKEVIRTUAL;
+import static org.graalvm.compiler.bytecode.Bytecodes.IOR;
import static org.graalvm.compiler.bytecode.Bytecodes.IREM;
import static org.graalvm.compiler.bytecode.Bytecodes.IRETURN;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISHL;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISHR;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISTORE_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISTORE_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISTORE_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISTORE_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.ISUB;
+import static org.graalvm.compiler.bytecode.Bytecodes.IUSHR;
+import static org.graalvm.compiler.bytecode.Bytecodes.IXOR;
import static org.graalvm.compiler.bytecode.Bytecodes.JSR;
import static org.graalvm.compiler.bytecode.Bytecodes.JSR_W;
+import static org.graalvm.compiler.bytecode.Bytecodes.L2D;
+import static org.graalvm.compiler.bytecode.Bytecodes.L2F;
+import static org.graalvm.compiler.bytecode.Bytecodes.L2I;
+import static org.graalvm.compiler.bytecode.Bytecodes.LADD;
import static org.graalvm.compiler.bytecode.Bytecodes.LALOAD;
+import static org.graalvm.compiler.bytecode.Bytecodes.LAND;
import static org.graalvm.compiler.bytecode.Bytecodes.LASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.LCMP;
+import static org.graalvm.compiler.bytecode.Bytecodes.LCONST_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.LCONST_1;
import static org.graalvm.compiler.bytecode.Bytecodes.LDC;
import static org.graalvm.compiler.bytecode.Bytecodes.LDC2_W;
import static org.graalvm.compiler.bytecode.Bytecodes.LDC_W;
import static org.graalvm.compiler.bytecode.Bytecodes.LDIV;
+import static org.graalvm.compiler.bytecode.Bytecodes.LLOAD;
+import static org.graalvm.compiler.bytecode.Bytecodes.LLOAD_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.LLOAD_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.LLOAD_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.LLOAD_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.LMUL;
+import static org.graalvm.compiler.bytecode.Bytecodes.LNEG;
import static org.graalvm.compiler.bytecode.Bytecodes.LOOKUPSWITCH;
+import static org.graalvm.compiler.bytecode.Bytecodes.LOR;
import static org.graalvm.compiler.bytecode.Bytecodes.LREM;
import static org.graalvm.compiler.bytecode.Bytecodes.LRETURN;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSHL;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSHR;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSTORE_0;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSTORE_1;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSTORE_2;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSTORE_3;
+import static org.graalvm.compiler.bytecode.Bytecodes.LSUB;
+import static org.graalvm.compiler.bytecode.Bytecodes.LUSHR;
+import static org.graalvm.compiler.bytecode.Bytecodes.LXOR;
+import static org.graalvm.compiler.bytecode.Bytecodes.MONITORENTER;
+import static org.graalvm.compiler.bytecode.Bytecodes.MONITOREXIT;
import static org.graalvm.compiler.bytecode.Bytecodes.MULTIANEWARRAY;
import static org.graalvm.compiler.bytecode.Bytecodes.NEW;
+import static org.graalvm.compiler.bytecode.Bytecodes.NEWARRAY;
+import static org.graalvm.compiler.bytecode.Bytecodes.NOP;
+import static org.graalvm.compiler.bytecode.Bytecodes.POP;
+import static org.graalvm.compiler.bytecode.Bytecodes.POP2;
import static org.graalvm.compiler.bytecode.Bytecodes.PUTFIELD;
import static org.graalvm.compiler.bytecode.Bytecodes.PUTSTATIC;
import static org.graalvm.compiler.bytecode.Bytecodes.RET;
import static org.graalvm.compiler.bytecode.Bytecodes.RETURN;
import static org.graalvm.compiler.bytecode.Bytecodes.SALOAD;
import static org.graalvm.compiler.bytecode.Bytecodes.SASTORE;
+import static org.graalvm.compiler.bytecode.Bytecodes.SIPUSH;
+import static org.graalvm.compiler.bytecode.Bytecodes.SWAP;
import static org.graalvm.compiler.bytecode.Bytecodes.TABLESWITCH;
+import static org.graalvm.compiler.bytecode.Bytecodes.WIDE;
import static org.graalvm.compiler.core.common.GraalOptions.SupportJsrBytecodes;
import java.util.ArrayDeque;
@@ -111,6 +247,7 @@
import org.graalvm.compiler.bytecode.Bytecodes;
import org.graalvm.compiler.core.common.PermanentBailoutException;
import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.options.OptionValues;
import jdk.vm.ci.code.BytecodeFrame;
@@ -159,7 +296,7 @@
int id;
final int startBci;
- int endBci;
+ int endBci; // The bci of the last bytecode in the block
private boolean isExceptionEntry;
private boolean isLoopHeader;
int loopId;
@@ -698,7 +835,9 @@
case SALOAD:
case ARRAYLENGTH:
case CHECKCAST:
+ case INSTANCEOF:
case NEW:
+ case NEWARRAY:
case ANEWARRAY:
case MULTIANEWARRAY:
case PUTSTATIC:
@@ -707,7 +846,8 @@
case GETFIELD:
case LDC:
case LDC_W:
- case LDC2_W: {
+ case LDC2_W:
+ case MONITORENTER: {
/*
* All bytecodes that can trigger lazy class initialization via a
* ClassInitializationPlugin (allocations, static field access) must be listed
@@ -720,7 +860,150 @@
addSuccessor(blockMap, bci, makeBlock(blockMap, stream.nextBCI()));
addSuccessor(blockMap, bci, handler);
}
+ break;
}
+
+ case NOP:
+ case ACONST_NULL:
+ case ICONST_M1:
+ case ICONST_0:
+ case ICONST_1:
+ case ICONST_2:
+ case ICONST_3:
+ case ICONST_4:
+ case ICONST_5:
+ case LCONST_0:
+ case LCONST_1:
+ case FCONST_0:
+ case FCONST_1:
+ case FCONST_2:
+ case DCONST_0:
+ case DCONST_1:
+ case BIPUSH:
+ case SIPUSH:
+ case ILOAD:
+ case LLOAD:
+ case FLOAD:
+ case DLOAD:
+ case ALOAD:
+ case ILOAD_0:
+ case ILOAD_1:
+ case ILOAD_2:
+ case ILOAD_3:
+ case LLOAD_0:
+ case LLOAD_1:
+ case LLOAD_2:
+ case LLOAD_3:
+ case FLOAD_0:
+ case FLOAD_1:
+ case FLOAD_2:
+ case FLOAD_3:
+ case DLOAD_0:
+ case DLOAD_1:
+ case DLOAD_2:
+ case DLOAD_3:
+ case ALOAD_0:
+ case ALOAD_1:
+ case ALOAD_2:
+ case ALOAD_3:
+ case ISTORE:
+ case LSTORE:
+ case FSTORE:
+ case DSTORE:
+ case ASTORE:
+ case ISTORE_0:
+ case ISTORE_1:
+ case ISTORE_2:
+ case ISTORE_3:
+ case LSTORE_0:
+ case LSTORE_1:
+ case LSTORE_2:
+ case LSTORE_3:
+ case FSTORE_0:
+ case FSTORE_1:
+ case FSTORE_2:
+ case FSTORE_3:
+ case DSTORE_0:
+ case DSTORE_1:
+ case DSTORE_2:
+ case DSTORE_3:
+ case ASTORE_0:
+ case ASTORE_1:
+ case ASTORE_2:
+ case ASTORE_3:
+ case POP:
+ case POP2:
+ case DUP:
+ case DUP_X1:
+ case DUP_X2:
+ case DUP2:
+ case DUP2_X1:
+ case DUP2_X2:
+ case SWAP:
+ case IADD:
+ case LADD:
+ case FADD:
+ case DADD:
+ case ISUB:
+ case LSUB:
+ case FSUB:
+ case DSUB:
+ case IMUL:
+ case LMUL:
+ case FMUL:
+ case DMUL:
+ case FDIV:
+ case DDIV:
+ case FREM:
+ case DREM:
+ case INEG:
+ case LNEG:
+ case FNEG:
+ case DNEG:
+ case ISHL:
+ case LSHL:
+ case ISHR:
+ case LSHR:
+ case IUSHR:
+ case LUSHR:
+ case IAND:
+ case LAND:
+ case IOR:
+ case LOR:
+ case IXOR:
+ case LXOR:
+ case IINC:
+ case I2L:
+ case I2F:
+ case I2D:
+ case L2I:
+ case L2F:
+ case L2D:
+ case F2I:
+ case F2L:
+ case F2D:
+ case D2I:
+ case D2L:
+ case D2F:
+ case I2B:
+ case I2C:
+ case I2S:
+ case LCMP:
+ case FCMPL:
+ case FCMPG:
+ case DCMPL:
+ case DCMPG:
+ case MONITOREXIT:
+ // All stack manipulation, comparison, conversion and arithmetic operators
+ // except for idiv and irem can't throw exceptions so the don't need to connect
+ // exception edges. MONITOREXIT can't throw exceptions in the context of
+ // compiled code because of the structured locking requirement in the parser.
+ break;
+
+ case WIDE:
+ case BREAKPOINT:
+ default:
+ throw new GraalError("Unhandled bytecode");
}
stream.next();
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java Fri Oct 11 12:08:01 2019 +0530
@@ -361,6 +361,7 @@
import org.graalvm.compiler.nodes.calc.ConditionalNode;
import org.graalvm.compiler.nodes.calc.FloatConvertNode;
import org.graalvm.compiler.nodes.calc.FloatDivNode;
+import org.graalvm.compiler.nodes.calc.FloatNormalizeCompareNode;
import org.graalvm.compiler.nodes.calc.IntegerBelowNode;
import org.graalvm.compiler.nodes.calc.IntegerEqualsNode;
import org.graalvm.compiler.nodes.calc.IntegerLessThanNode;
@@ -370,7 +371,6 @@
import org.graalvm.compiler.nodes.calc.MulNode;
import org.graalvm.compiler.nodes.calc.NarrowNode;
import org.graalvm.compiler.nodes.calc.NegateNode;
-import org.graalvm.compiler.nodes.calc.FloatNormalizeCompareNode;
import org.graalvm.compiler.nodes.calc.ObjectEqualsNode;
import org.graalvm.compiler.nodes.calc.OrNode;
import org.graalvm.compiler.nodes.calc.RemNode;
@@ -2706,8 +2706,10 @@
}
MonitorIdNode monitorId = frameState.peekMonitorId();
ValueNode lockedObject = frameState.popLock();
- if (GraphUtil.originalValue(lockedObject) != GraphUtil.originalValue(x)) {
- throw bailout(String.format("unbalanced monitors: mismatch at monitorexit, %s != %s", GraphUtil.originalValue(x), GraphUtil.originalValue(lockedObject)));
+ ValueNode originalLockedObject = GraphUtil.originalValue(lockedObject, false);
+ ValueNode originalX = GraphUtil.originalValue(x, false);
+ if (originalLockedObject != originalX) {
+ throw bailout(String.format("unbalanced monitors: mismatch at monitorexit, %s != %s", originalLockedObject, originalX));
}
MonitorExitNode monitorExit = append(new MonitorExitNode(lockedObject, monitorId, escapedValue));
monitorExit.setStateAfter(createFrameState(bci, monitorExit));
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/FrameStateBuilder.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/FrameStateBuilder.java Fri Oct 11 12:08:01 2019 +0530
@@ -35,6 +35,7 @@
import static org.graalvm.compiler.bytecode.Bytecodes.SWAP;
import static org.graalvm.compiler.debug.GraalError.shouldNotReachHere;
import static org.graalvm.compiler.nodes.FrameState.TWO_SLOT_MARKER;
+import static org.graalvm.compiler.nodes.util.GraphUtil.originalValue;
import java.util.ArrayList;
import java.util.Arrays;
@@ -70,7 +71,6 @@
import org.graalvm.compiler.nodes.graphbuilderconf.IntrinsicContext.SideEffectsState;
import org.graalvm.compiler.nodes.graphbuilderconf.ParameterPlugin;
import org.graalvm.compiler.nodes.java.MonitorIdNode;
-import org.graalvm.compiler.nodes.util.GraphUtil;
import jdk.vm.ci.code.BytecodeFrame;
import jdk.vm.ci.meta.Assumptions;
@@ -384,38 +384,54 @@
return new FrameStateBuilder(this);
}
- public boolean isCompatibleWith(FrameStateBuilder other) {
+ private String incompatibilityErrorMessage(String reason, FrameStateBuilder other) {
+ return String.format("Frame states being merged are incompatible: %s%n This frame state: %s%nOther frame state: %s%nParser context: %s", reason, this, other, parser);
+ }
+
+ /**
+ * Checks invariants that must hold when merging {@code other} into this frame state.
+ *
+ * @param other
+ * @throws PermanentBailoutException if the frame states are incompatible with respect to their
+ * locked objects. This indicates bytecode that has unstructured or unbalanced
+ * locks.
+ * @throws GraalError if the frame states are incompatible in terms of {@link #rethrowException}
+ * or stack slots
+ */
+ public void checkCompatibleWith(FrameStateBuilder other) {
assert code.equals(other.code) && graph == other.graph && localsSize() == other.localsSize() : "Can only compare frame states of the same method";
assert lockedObjects.length == monitorIds.length && other.lockedObjects.length == other.monitorIds.length : "mismatch between lockedObjects and monitorIds";
if (rethrowException != other.rethrowException) {
- return false;
+ throw new GraalError(incompatibilityErrorMessage("mismatch in rethrowException flag", other));
}
if (stackSize() != other.stackSize()) {
- return false;
+ throw new GraalError(incompatibilityErrorMessage("mismatch in stack sizes", other));
}
for (int i = 0; i < stackSize(); i++) {
ValueNode x = stack[i];
ValueNode y = other.stack[i];
assert x != null && y != null;
if (x != y && (x == TWO_SLOT_MARKER || x.isDeleted() || y == TWO_SLOT_MARKER || y.isDeleted() || x.getStackKind() != y.getStackKind())) {
- return false;
+ throw new GraalError(incompatibilityErrorMessage("mismatch in stack types", other));
}
}
if (lockedObjects.length != other.lockedObjects.length) {
- return false;
+ throw new PermanentBailoutException(incompatibilityErrorMessage("unbalanced monitors - locked objects do not match", other));
}
for (int i = 0; i < lockedObjects.length; i++) {
- if (GraphUtil.originalValue(lockedObjects[i]) != GraphUtil.originalValue(other.lockedObjects[i]) || monitorIds[i] != other.monitorIds[i]) {
- throw new PermanentBailoutException("unbalanced monitors");
+ if (originalValue(lockedObjects[i], false) != originalValue(other.lockedObjects[i], false)) {
+ throw new PermanentBailoutException(incompatibilityErrorMessage("unbalanced monitors - locked objects do not match", other));
+ }
+ if (monitorIds[i] != other.monitorIds[i]) {
+ throw new PermanentBailoutException(incompatibilityErrorMessage("unbalanced monitors - monitors do not match", other));
}
}
- return true;
}
public void merge(AbstractMergeNode block, FrameStateBuilder other) {
- GraalError.guarantee(isCompatibleWith(other), "stacks do not match on merge; bytecodes would not verify:%nexpect: %s%nactual: %s", block, other);
+ checkCompatibleWith(other);
for (int i = 0; i < localsSize(); i++) {
locals[i] = merge(locals[i], other.locals[i], block);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/reflect/Field_set02.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/reflect/Field_set02.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,11 @@
package org.graalvm.compiler.jtt.reflect;
+import org.graalvm.compiler.debug.DebugOptions;
+import org.graalvm.compiler.jtt.JTTTest;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
-
/*
*/
public class Field_set02 extends JTTTest {
@@ -76,7 +77,13 @@
@Test
public void run0() throws Throwable {
- runTest("test", 0);
+ try {
+ runTest("test", 0);
+ } catch (AssertionError e) {
+ System.err.println(e);
+ System.err.println("object.byteField == " + object.byteField);
+ runTest(new OptionValues(getInitialOptions(), DebugOptions.Dump, ":2"), "test", 0);
+ }
}
@Test
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64ArithmeticOp.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64ArithmeticOp.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -481,7 +481,7 @@
*/
public MultiplyAddSubOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue src1, AllocatableValue src2, AllocatableValue src3) {
super(TYPE);
- assert op == ADD || op == SUB;
+ assert op == ADD || op == SUB || op == FADD;
this.op = op;
this.result = result;
this.src1 = src1;
@@ -499,6 +499,9 @@
case SUB:
masm.msub(size, asRegister(result), asRegister(src1), asRegister(src2), asRegister(src3));
break;
+ case FADD:
+ masm.fmadd(size, asRegister(result), asRegister(src1), asRegister(src2), asRegister(src3));
+ break;
default:
throw GraalError.shouldNotReachHere();
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64ZeroMemoryOp.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64ZeroMemoryOp.java Fri Oct 11 12:08:01 2019 +0530
@@ -32,6 +32,7 @@
import org.graalvm.compiler.asm.Label;
import org.graalvm.compiler.asm.aarch64.AArch64Address;
import org.graalvm.compiler.asm.aarch64.AArch64Assembler;
+import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag;
import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.Opcode;
@@ -39,7 +40,7 @@
import jdk.vm.ci.code.CodeUtil;
import jdk.vm.ci.code.Register;
-import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.Value;
/**
* Zero a chunk of memory on AArch64.
@@ -48,136 +49,186 @@
public final class AArch64ZeroMemoryOp extends AArch64LIRInstruction {
public static final LIRInstructionClass<AArch64ZeroMemoryOp> TYPE = LIRInstructionClass.create(AArch64ZeroMemoryOp.class);
- @Use({REG}) protected AllocatableValue addressValue;
- @Use({REG}) protected AllocatableValue lengthValue;
+ @Use({REG}) protected Value addressValue;
+ @Use({REG}) protected Value lengthValue;
+ @Temp({REG}) protected Value addressValueTemp;
+ @Temp({REG}) protected Value lengthValueTemp;
+
+ private final boolean isAligned;
private final boolean useDcZva;
private final int zvaLength;
/**
* Constructor of AArch64ZeroMemoryOp.
*
- * @param address allocatable 8-byte aligned base address of the memory chunk.
- * @param length allocatable length of the memory chunk, the value must be multiple of 8.
+ * @param address starting address of the memory chunk to be zeroed.
+ * @param length size of the memory chunk to be zeroed, in bytes.
+ * @param isAligned whether both address and size are aligned to 8 bytes.
* @param useDcZva is DC ZVA instruction is able to use.
* @param zvaLength the ZVA length info of current AArch64 CPU, negative value indicates length
* is unknown at compile time.
*/
- public AArch64ZeroMemoryOp(AllocatableValue address, AllocatableValue length, boolean useDcZva, int zvaLength) {
+ public AArch64ZeroMemoryOp(Value address, Value length, boolean isAligned, boolean useDcZva, int zvaLength) {
super(TYPE);
this.addressValue = address;
this.lengthValue = length;
+ this.addressValueTemp = address;
+ this.lengthValueTemp = length;
this.useDcZva = useDcZva;
this.zvaLength = zvaLength;
+ this.isAligned = isAligned;
}
@Override
protected void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
Register base = asRegister(addressValue);
Register size = asRegister(lengthValue);
- if (useDcZva && zvaLength > 0) {
- // From ARMv8-A architecture reference manual D12.2.35 Data Cache Zero ID register:
- // A valid ZVA length should be a power-of-2 value in [4, 2048]
- assert (CodeUtil.isPowerOf2(zvaLength) && 4 <= zvaLength && zvaLength <= 2048);
- emitZeroMemoryWithDc(masm, base, size, zvaLength);
- } else {
- // Use store pair instructions (STP) to zero memory as a fallback.
- emitZeroMemoryWithStp(masm, base, size);
- }
- }
+
+ try (AArch64MacroAssembler.ScratchRegister scratchRegister = masm.getScratchRegister()) {
+ Register alignmentBits = scratchRegister.getRegister();
+
+ Label tail = new Label();
+ Label done = new Label();
+
+ // Jump to DONE if size is zero.
+ masm.cbz(64, size, done);
+
+ if (!isAligned) {
+ Label baseAlignedTo2Bytes = new Label();
+ Label baseAlignedTo4Bytes = new Label();
+ Label baseAlignedTo8Bytes = new Label();
+
+ // Jump to per-byte zeroing loop if the zeroing size is less than 8
+ masm.cmp(64, size, 8);
+ masm.branchConditionally(ConditionFlag.LT, tail);
+
+ // Make base 8-byte aligned
+ masm.neg(64, alignmentBits, base);
+ masm.and(64, alignmentBits, alignmentBits, 7);
+
+ masm.tbz(alignmentBits, 0, baseAlignedTo2Bytes);
+ masm.sub(64, size, size, 1);
+ masm.str(8, zr, AArch64Address.createPostIndexedImmediateAddress(base, 1));
+ masm.bind(baseAlignedTo2Bytes);
+
+ masm.tbz(alignmentBits, 1, baseAlignedTo4Bytes);
+ masm.sub(64, size, size, 2);
+ masm.str(16, zr, AArch64Address.createPostIndexedImmediateAddress(base, 2));
+ masm.bind(baseAlignedTo4Bytes);
- /**
- * Zero a chunk of memory with DC ZVA instructions.
- *
- * @param masm the AArch64 macro assembler.
- * @param base base an 8-byte aligned address of the memory chunk to be zeroed.
- * @param size size of the memory chunk to be zeroed, in bytes, must be multiple of 8.
- * @param zvaLength the ZVA length info of current AArch64 CPU.
- */
- private static void emitZeroMemoryWithDc(AArch64MacroAssembler masm, Register base, Register size, int zvaLength) {
- Label preLoop = new Label();
- Label zvaLoop = new Label();
- Label postLoop = new Label();
- Label tail = new Label();
- Label done = new Label();
+ masm.tbz(alignmentBits, 2, baseAlignedTo8Bytes);
+ masm.sub(64, size, size, 4);
+ masm.str(32, zr, AArch64Address.createPostIndexedImmediateAddress(base, 4));
+ masm.bind(baseAlignedTo8Bytes);
+ // At this point base is 8-byte aligned.
+ }
+
+ if (useDcZva && zvaLength > 0) {
+ // From ARMv8-A architecture reference manual D12.2.35 Data Cache Zero ID register:
+ // A valid ZVA length should be a power-of-2 value in [4, 2048]
+ assert (CodeUtil.isPowerOf2(zvaLength) && 4 <= zvaLength && zvaLength <= 2048);
- try (AArch64MacroAssembler.ScratchRegister sc1 = masm.getScratchRegister()) {
- Register rscratch1 = sc1.getRegister();
+ Label preCheck = new Label();
+ Label preLoop = new Label();
+ Label mainCheck = new Label();
+ Label mainLoop = new Label();
+ Label postCheck = new Label();
+ Label postLoop = new Label();
+
+ masm.neg(64, alignmentBits, base);
+ masm.and(64, alignmentBits, alignmentBits, zvaLength - 1);
- // Count number of bytes to be pre-zeroed to align base address with ZVA length.
- masm.neg(64, rscratch1, base);
- masm.and(64, rscratch1, rscratch1, zvaLength - 1);
+ // Is size less than number of bytes to be pre-zeroed? Jump to post check if so.
+ masm.cmp(64, size, alignmentBits);
+ masm.branchConditionally(AArch64Assembler.ConditionFlag.LE, postCheck);
+ masm.sub(64, size, size, alignmentBits);
+
+ // Pre loop: align base according to the supported bulk zeroing stride.
+ masm.jmp(preCheck);
+
+ masm.align(crb.target.wordSize * 2);
+ masm.bind(preLoop);
+ masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
+ masm.bind(preCheck);
+ masm.subs(64, alignmentBits, alignmentBits, 8);
+ masm.branchConditionally(AArch64Assembler.ConditionFlag.GE, preLoop);
- // Is size less than number of bytes to be pre-zeroed? Jump to POST_LOOP if so.
- masm.cmp(64, size, rscratch1);
- masm.branchConditionally(AArch64Assembler.ConditionFlag.LE, postLoop);
- masm.sub(64, size, size, rscratch1);
+ // Main loop: bulk zeroing
+ masm.jmp(mainCheck);
+
+ masm.align(crb.target.wordSize * 2);
+ masm.bind(mainLoop);
+ masm.dc(AArch64Assembler.DataCacheOperationType.ZVA, base);
+ masm.add(64, base, base, zvaLength);
+ masm.bind(mainCheck);
+ masm.subs(64, size, size, zvaLength);
+ masm.branchConditionally(AArch64Assembler.ConditionFlag.GE, mainLoop);
+
+ masm.add(64, size, size, zvaLength);
+
+ // Post loop: handle bytes after the main loop
+ masm.jmp(postCheck);
- // Pre-ZVA loop.
- masm.bind(preLoop);
- masm.subs(64, rscratch1, rscratch1, 8);
- masm.branchConditionally(AArch64Assembler.ConditionFlag.LT, zvaLoop);
- masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
- masm.jmp(preLoop);
+ masm.align(crb.target.wordSize * 2);
+ masm.bind(postLoop);
+ masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
+ masm.bind(postCheck);
+ masm.subs(64, size, size, 8);
+ masm.branchConditionally(AArch64Assembler.ConditionFlag.GE, postLoop);
+
+ if (!isAligned) {
+ // Restore size for tail zeroing
+ masm.add(64, size, size, 8);
+ }
+ } else {
+ Label mainCheck = new Label();
+ Label mainLoop = new Label();
+
+ if (!isAligned) {
+ // After aligning base, we may have size less than 8. Need to check again.
+ masm.cmp(64, size, 8);
+ masm.branchConditionally(ConditionFlag.LT, tail);
+ }
- // ZVA loop.
- masm.bind(zvaLoop);
- masm.subs(64, size, size, zvaLength);
- masm.branchConditionally(AArch64Assembler.ConditionFlag.LT, tail);
- masm.dc(AArch64Assembler.DataCacheOperationType.ZVA, base);
- masm.add(64, base, base, zvaLength);
- masm.jmp(zvaLoop);
+ masm.tbz(base, 3, mainCheck);
+ masm.sub(64, size, size, 8);
+ masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
+ masm.jmp(mainCheck);
- // Handle bytes after ZVA loop.
+ // The STP loop that zeros 16 bytes in each iteration.
+ masm.align(crb.target.wordSize * 2);
+ masm.bind(mainLoop);
+ masm.stp(64, zr, zr, AArch64Address.createPostIndexedImmediateAddress(base, 2));
+ masm.bind(mainCheck);
+ masm.subs(64, size, size, 16);
+ masm.branchConditionally(AArch64Assembler.ConditionFlag.GE, mainLoop);
+
+ // We may need to zero the tail 8 bytes of the memory chunk.
+ masm.add(64, size, size, 16);
+ masm.tbz(size, 3, tail);
+ masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
+
+ if (!isAligned) {
+ // Adjust size for tail zeroing
+ masm.sub(64, size, size, 8);
+ }
+ }
+
masm.bind(tail);
- masm.add(64, size, size, zvaLength);
+ if (!isAligned) {
+ Label perByteZeroingLoop = new Label();
- // Post-ZVA loop.
- masm.bind(postLoop);
- masm.subs(64, size, size, 8);
- masm.branchConditionally(AArch64Assembler.ConditionFlag.LT, done);
- masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
- masm.jmp(postLoop);
-
- // Done.
+ masm.cbz(64, size, done);
+ // We have to ensure size > 0 when entering the following loop
+ masm.align(crb.target.wordSize * 2);
+ masm.bind(perByteZeroingLoop);
+ masm.str(8, zr, AArch64Address.createPostIndexedImmediateAddress(base, 1));
+ masm.subs(64, size, size, 1);
+ masm.branchConditionally(AArch64Assembler.ConditionFlag.NE, perByteZeroingLoop);
+ }
masm.bind(done);
}
}
- /**
- * Zero a chunk of memory with STP instructions.
- *
- * @param masm the AArch64 macro assembler.
- * @param base base an 8-byte aligned address of the memory chunk to be zeroed.
- * @param size size of the memory chunk to be zeroed, in bytes, must be multiple of 8.
- */
- private static void emitZeroMemoryWithStp(AArch64MacroAssembler masm, Register base, Register size) {
- Label loop = new Label();
- Label tail = new Label();
- Label done = new Label();
-
- // Jump to DONE if size is zero.
- masm.cbz(64, size, done);
-
- // Is base address already 16-byte aligned? Jump to LDP loop if so.
- masm.tbz(base, 3, loop);
- masm.sub(64, size, size, 8);
- masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
-
- // The STP loop that zeros 16 bytes in each iteration.
- masm.bind(loop);
- masm.subs(64, size, size, 16);
- masm.branchConditionally(AArch64Assembler.ConditionFlag.LT, tail);
- masm.stp(64, zr, zr, AArch64Address.createPostIndexedImmediateAddress(base, 2));
- masm.jmp(loop);
-
- // We may need to zero the tail 8 bytes of the memory chunk.
- masm.bind(tail);
- masm.adds(64, size, size, 16);
- masm.branchConditionally(AArch64Assembler.ConditionFlag.EQ, done);
- masm.str(64, zr, AArch64Address.createPostIndexedImmediateAddress(base, 8));
-
- // Done.
- masm.bind(done);
- }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.sparc/src/org/graalvm/compiler/lir/sparc/SPARCControlFlow.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.sparc/src/org/graalvm/compiler/lir/sparc/SPARCControlFlow.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,7 +115,7 @@
public class SPARCControlFlow {
// This describes the maximum offset between the first emitted (load constant in to scratch,
// if does not fit into simm5 of cbcond) instruction and the final branch instruction
- private static final int maximumSelfOffsetInstructions = 2;
+ private static final int maximumSelfOffsetInstructions = 10;
public static final class ReturnOp extends SPARCBlockEndOp {
public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/gen/LIRGeneratorTool.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/gen/LIRGeneratorTool.java Fri Oct 11 12:08:01 2019 +0530
@@ -355,7 +355,7 @@
}
@SuppressWarnings("unused")
- default void emitZeroMemory(Value address, Value length) {
+ default void emitZeroMemory(Value address, Value length, boolean isAligned) {
throw GraalError.unimplemented("Bulk zeroing is not implemented on this architecture");
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/LoopFragmentInside.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/LoopFragmentInside.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -245,60 +245,40 @@
// Discard the segment entry and its flow, after if merging it into the loop
StructuredGraph graph = mainLoopBegin.graph();
IfNode loopTest = mainCounted.getLimitTest();
- IfNode newSegmentTest = getDuplicatedNode(loopTest);
- AbstractBeginNode trueSuccessor = loopTest.trueSuccessor();
- AbstractBeginNode falseSuccessor = loopTest.falseSuccessor();
- FixedNode firstNode;
- boolean codeInTrueSide = false;
- if (trueSuccessor == mainCounted.getBody()) {
- firstNode = trueSuccessor.next();
- codeInTrueSide = true;
- } else {
- assert (falseSuccessor == mainCounted.getBody());
- firstNode = falseSuccessor.next();
- }
- trueSuccessor = newSegmentTest.trueSuccessor();
- falseSuccessor = newSegmentTest.falseSuccessor();
+ IfNode newSegmentLoopTest = getDuplicatedNode(loopTest);
+
+ // Redirect anchors
+ AbstractBeginNode falseSuccessor = newSegmentLoopTest.falseSuccessor();
for (Node usage : falseSuccessor.anchored().snapshot()) {
usage.replaceFirstInput(falseSuccessor, loopTest.falseSuccessor());
}
+ AbstractBeginNode trueSuccessor = newSegmentLoopTest.trueSuccessor();
for (Node usage : trueSuccessor.anchored().snapshot()) {
usage.replaceFirstInput(trueSuccessor, loopTest.trueSuccessor());
}
- AbstractBeginNode startBlockNode;
- if (codeInTrueSide) {
- startBlockNode = trueSuccessor;
- } else {
- graph.getDebug().dump(DebugContext.VERBOSE_LEVEL, mainLoopBegin.graph(), "before");
- startBlockNode = falseSuccessor;
- }
- FixedNode lastNode = getBlockEnd(startBlockNode);
- LoopEndNode loopEndNode = mainLoopBegin.getSingleLoopEnd();
- FixedWithNextNode lastCodeNode = (FixedWithNextNode) loopEndNode.predecessor();
- FixedNode newSegmentFirstNode = getDuplicatedNode(firstNode);
- FixedWithNextNode newSegmentLastNode = getDuplicatedNode(lastCodeNode);
- graph.getDebug().dump(DebugContext.DETAILED_LEVEL, loopEndNode.graph(), "Before placing segment");
- if (firstNode instanceof LoopEndNode) {
+
+ // remove if test
+ graph.removeSplitPropagate(newSegmentLoopTest, loopTest.trueSuccessor() == mainCounted.getBody() ? trueSuccessor : falseSuccessor);
+
+ graph.getDebug().dump(DebugContext.DETAILED_LEVEL, graph, "Before placing segment");
+ if (mainCounted.getBody().next() instanceof LoopEndNode) {
GraphUtil.killCFG(getDuplicatedNode(mainLoopBegin));
} else {
- newSegmentLastNode.clearSuccessors();
- startBlockNode.setNext(lastNode);
+ AbstractBeginNode newSegmentBegin = getDuplicatedNode(mainLoopBegin);
+ FixedNode newSegmentFirstNode = newSegmentBegin.next();
+ EndNode newSegmentEnd = getBlockEnd(newSegmentBegin);
+ FixedWithNextNode newSegmentLastNode = (FixedWithNextNode) newSegmentEnd.predecessor();
+ LoopEndNode loopEndNode = mainLoopBegin.getSingleLoopEnd();
+ FixedWithNextNode lastCodeNode = (FixedWithNextNode) loopEndNode.predecessor();
+
+ newSegmentBegin.clearSuccessors();
lastCodeNode.replaceFirstSuccessor(loopEndNode, newSegmentFirstNode);
- newSegmentLastNode.replaceFirstSuccessor(lastNode, loopEndNode);
- lastCodeNode.setNext(newSegmentFirstNode);
- newSegmentLastNode.setNext(loopEndNode);
- startBlockNode.clearSuccessors();
- lastNode.safeDelete();
- Node newSegmentTestStart = newSegmentTest.predecessor();
- LogicNode newSegmentIfTest = newSegmentTest.condition();
- newSegmentTestStart.clearSuccessors();
- newSegmentTest.safeDelete();
- newSegmentIfTest.safeDelete();
- trueSuccessor.safeDelete();
- falseSuccessor.safeDelete();
- newSegmentTestStart.safeDelete();
+ newSegmentLastNode.replaceFirstSuccessor(newSegmentEnd, loopEndNode);
+
+ newSegmentBegin.safeDelete();
+ newSegmentEnd.safeDelete();
}
- graph.getDebug().dump(DebugContext.DETAILED_LEVEL, loopEndNode.graph(), "After placing segment");
+ graph.getDebug().dump(DebugContext.DETAILED_LEVEL, graph, "After placing segment");
}
private static EndNode getBlockEnd(FixedNode node) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/ExceptionLivenessTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+package org.graalvm.compiler.nodes.test;
+
+import static org.graalvm.compiler.java.BytecodeParserOptions.InlineDuringParsing;
+
+import org.graalvm.compiler.core.phases.HighTier;
+import org.graalvm.compiler.core.test.GraalCompilerTest;
+import org.graalvm.compiler.options.OptionValues;
+import org.junit.Test;
+
+public class ExceptionLivenessTest extends GraalCompilerTest {
+ @Test
+ public void testNewarray() {
+ OptionValues options = new OptionValues(getInitialOptions(), HighTier.Options.Inline, false, InlineDuringParsing, false);
+ test(options, "newarraySnippet");
+ }
+
+ public static int[] newarraySnippet() {
+ int[] array = new int[4];
+
+ dummy();
+ try {
+ array = new int[-10];
+ } catch (NegativeArraySizeException exc3) {
+ }
+ return array;
+ }
+
+ @BytecodeParserNeverInline
+ static void dummy() {
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/AbstractBeginNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/AbstractBeginNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
import org.graalvm.compiler.graph.IterableNodeType;
import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.graph.NodeClass;
+import org.graalvm.compiler.graph.Position;
import org.graalvm.compiler.graph.iterators.NodeIterable;
import org.graalvm.compiler.nodeinfo.InputType;
import org.graalvm.compiler.nodeinfo.NodeInfo;
@@ -97,6 +98,19 @@
}
}
+ public boolean isUsedAsGuardInput() {
+ if (this.hasUsages()) {
+ for (Node n : usages()) {
+ for (Position inputPosition : n.inputPositions()) {
+ if (inputPosition.getInputType() == InputType.Guard && inputPosition.get(n) == this) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
public NodeIterable<GuardNode> guards() {
return usages().filter(GuardNode.class);
}
@@ -105,6 +119,10 @@
return usages();
}
+ public boolean hasAnchored() {
+ return this.hasUsages();
+ }
+
public NodeIterable<FixedNode> getBlockNodes() {
return new NodeIterable<FixedNode>() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/IfNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/IfNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -548,7 +548,7 @@
return false;
}
- if (trueSuccessor().anchored().isNotEmpty() || falseSuccessor().anchored().isNotEmpty()) {
+ if (trueSuccessor().hasAnchored() || falseSuccessor().hasAnchored()) {
return false;
}
@@ -1216,6 +1216,10 @@
return false;
}
+ if (trueSuccessor().isUsedAsGuardInput() || falseSuccessor().isUsedAsGuardInput()) {
+ return false;
+ }
+
ValuePhiNode phi = (ValuePhiNode) generalPhi;
EconomicMap<Node, NodeColor> coloredNodes = EconomicMap.create(Equivalence.IDENTITY, 8);
@@ -1645,6 +1649,10 @@
return false;
}
+ if (trueSuccessor().isUsedAsGuardInput() || falseSuccessor().isUsedAsGuardInput()) {
+ return false;
+ }
+
// Ensure phi is used by at most the comparison and the merge's frame state (if any)
ValuePhiNode phi = (ValuePhiNode) singleUsage;
NodeIterable<Node> phiUsages = phi.usages();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/AbsNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/AbsNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,10 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_2;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp.Abs;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.CanonicalizerTool;
@@ -46,7 +48,7 @@
public static final NodeClass<AbsNode> TYPE = NodeClass.create(AbsNode.class);
public AbsNode(ValueNode x) {
- super(TYPE, ArithmeticOpTable::getAbs, x);
+ super(TYPE, getArithmeticOpTable(x).getAbs(), x);
}
public static ValueNode create(ValueNode value, NodeView view) {
@@ -67,6 +69,11 @@
}
@Override
+ protected UnaryOp<Abs> getOp(ArithmeticOpTable table) {
+ return table.getAbs();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
ValueNode ret = super.canonical(tool, forValue);
if (ret != this) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/AddNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/AddNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,7 @@
}
protected AddNode(NodeClass<? extends AddNode> c, ValueNode x, ValueNode y) {
- super(c, ArithmeticOpTable::getAdd, x, y);
+ super(c, getArithmeticOpTable(x).getAdd(), x, y);
}
public static ValueNode create(ValueNode x, ValueNode y, NodeView view) {
@@ -71,6 +71,11 @@
}
}
+ @Override
+ protected BinaryOp<Add> getOp(ArithmeticOpTable table) {
+ return table.getAdd();
+ }
+
private static ValueNode canonical(AddNode addNode, BinaryOp<Add> op, ValueNode forX, ValueNode forY, NodeView view) {
AddNode self = addNode;
boolean associative = op.isAssociative();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/AndNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/AndNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
public static final NodeClass<AndNode> TYPE = NodeClass.create(AndNode.class);
public AndNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getAnd, x, y);
+ super(TYPE, getArithmeticOpTable(x).getAnd(), x, y);
}
public static ValueNode create(ValueNode x, ValueNode y, NodeView view) {
@@ -65,6 +65,11 @@
}
@Override
+ protected BinaryOp<And> getOp(ArithmeticOpTable table) {
+ return table.getAnd();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
ValueNode ret = super.canonical(tool, forX, forY);
if (ret != this) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/BinaryArithmeticNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/BinaryArithmeticNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,9 +27,6 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
-import java.io.Serializable;
-import java.util.function.Function;
-
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp;
import org.graalvm.compiler.core.common.type.IntegerStamp;
@@ -58,20 +55,20 @@
@SuppressWarnings("rawtypes") public static final NodeClass<BinaryArithmeticNode> TYPE = NodeClass.create(BinaryArithmeticNode.class);
- protected interface SerializableBinaryFunction<T> extends Function<ArithmeticOpTable, BinaryOp<T>>, Serializable {
+ protected BinaryArithmeticNode(NodeClass<? extends BinaryArithmeticNode<OP>> c, BinaryOp<OP> opForStampComputation, ValueNode x, ValueNode y) {
+ super(c, opForStampComputation.foldStamp(x.stamp(NodeView.DEFAULT), y.stamp(NodeView.DEFAULT)), x, y);
}
- protected final SerializableBinaryFunction<OP> getOp;
-
- protected BinaryArithmeticNode(NodeClass<? extends BinaryArithmeticNode<OP>> c, SerializableBinaryFunction<OP> getOp, ValueNode x, ValueNode y) {
- super(c, getOp.apply(ArithmeticOpTable.forStamp(x.stamp(NodeView.DEFAULT))).foldStamp(x.stamp(NodeView.DEFAULT), y.stamp(NodeView.DEFAULT)), x, y);
- this.getOp = getOp;
+ public static ArithmeticOpTable getArithmeticOpTable(ValueNode forValue) {
+ return ArithmeticOpTable.forStamp(forValue.stamp(NodeView.DEFAULT));
}
+ protected abstract BinaryOp<OP> getOp(ArithmeticOpTable table);
+
protected final BinaryOp<OP> getOp(ValueNode forX, ValueNode forY) {
- ArithmeticOpTable table = ArithmeticOpTable.forStamp(forX.stamp(NodeView.DEFAULT));
- assert table.equals(ArithmeticOpTable.forStamp(forY.stamp(NodeView.DEFAULT)));
- return getOp.apply(table);
+ ArithmeticOpTable table = getArithmeticOpTable(forX);
+ assert table.equals(getArithmeticOpTable(forY));
+ return getOp(table);
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/FloatConvertNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/FloatConvertNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,12 +25,12 @@
package org.graalvm.compiler.nodes.calc;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_8;
-
-import java.util.EnumMap;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.calc.FloatConvert;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.FloatConvertOp;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.CanonicalizerTool;
import org.graalvm.compiler.lir.gen.ArithmeticLIRGeneratorTool;
@@ -55,16 +55,8 @@
protected final FloatConvert op;
- private static final EnumMap<FloatConvert, SerializableUnaryFunction<FloatConvertOp>> getOps;
- static {
- getOps = new EnumMap<>(FloatConvert.class);
- for (FloatConvert op : FloatConvert.values()) {
- getOps.put(op, table -> table.getFloatConvert(op));
- }
- }
-
public FloatConvertNode(FloatConvert op, ValueNode input) {
- super(TYPE, getOps.get(op), input);
+ super(TYPE, getArithmeticOpTable(input).getFloatConvert(op), input);
this.op = op;
}
@@ -76,6 +68,11 @@
return new FloatConvertNode(op, input);
}
+ @Override
+ protected UnaryOp<FloatConvertOp> getOp(ArithmeticOpTable table) {
+ return table.getFloatConvert(op);
+ }
+
public FloatConvert getFloatConvert() {
return op;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/FloatDivNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/FloatDivNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@
}
protected FloatDivNode(NodeClass<? extends FloatDivNode> c, ValueNode x, ValueNode y) {
- super(c, ArithmeticOpTable::getDiv, x, y);
+ super(c, getArithmeticOpTable(x).getDiv(), x, y);
assert stamp instanceof FloatStamp;
}
@@ -67,6 +67,11 @@
}
@Override
+ protected BinaryOp<Div> getOp(ArithmeticOpTable table) {
+ return table.getDiv();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
ValueNode ret = super.canonical(tool, forX, forY);
if (ret != this) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerConvertNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerConvertNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -24,8 +24,7 @@
package org.graalvm.compiler.nodes.calc;
-import java.io.Serializable;
-import java.util.function.Function;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.IntegerConvertOp;
@@ -53,20 +52,11 @@
public abstract class IntegerConvertNode<OP, REV> extends UnaryNode implements ArithmeticOperation, ConvertNode, ArithmeticLIRLowerable, StampInverter {
@SuppressWarnings("rawtypes") public static final NodeClass<IntegerConvertNode> TYPE = NodeClass.create(IntegerConvertNode.class);
- protected final SerializableIntegerConvertFunction<OP> getOp;
- protected final SerializableIntegerConvertFunction<REV> getReverseOp;
-
protected final int inputBits;
protected final int resultBits;
- protected interface SerializableIntegerConvertFunction<T> extends Function<ArithmeticOpTable, IntegerConvertOp<T>>, Serializable {
- }
-
- protected IntegerConvertNode(NodeClass<? extends IntegerConvertNode<OP, REV>> c, SerializableIntegerConvertFunction<OP> getOp, SerializableIntegerConvertFunction<REV> getReverseOp, int inputBits,
- int resultBits, ValueNode input) {
- super(c, getOp.apply(ArithmeticOpTable.forStamp(input.stamp(NodeView.DEFAULT))).foldStamp(inputBits, resultBits, input.stamp(NodeView.DEFAULT)), input);
- this.getOp = getOp;
- this.getReverseOp = getReverseOp;
+ protected IntegerConvertNode(NodeClass<? extends IntegerConvertNode<OP, REV>> c, IntegerConvertOp<OP> opForStampComputation, int inputBits, int resultBits, ValueNode input) {
+ super(c, opForStampComputation.foldStamp(inputBits, resultBits, input.stamp(NodeView.DEFAULT)), input);
this.inputBits = inputBits;
this.resultBits = resultBits;
assert PrimitiveStamp.getBits(input.stamp(NodeView.DEFAULT)) == 0 || PrimitiveStamp.getBits(input.stamp(NodeView.DEFAULT)) == inputBits;
@@ -80,13 +70,13 @@
return resultBits;
}
- protected final IntegerConvertOp<OP> getOp(ValueNode forValue) {
- return getOp.apply(ArithmeticOpTable.forStamp(forValue.stamp(NodeView.DEFAULT)));
- }
+ protected abstract IntegerConvertOp<OP> getOp(ArithmeticOpTable table);
+
+ protected abstract IntegerConvertOp<REV> getReverseOp(ArithmeticOpTable table);
@Override
public final IntegerConvertOp<OP> getArithmeticOp() {
- return getOp(getValue());
+ return getOp(getArithmeticOpTable(getValue()));
}
@Override
@@ -96,7 +86,7 @@
@Override
public Constant reverse(Constant c, ConstantReflectionProvider constantReflection) {
- IntegerConvertOp<REV> reverse = getReverseOp.apply(ArithmeticOpTable.forStamp(stamp(NodeView.DEFAULT)));
+ IntegerConvertOp<REV> reverse = getReverseOp(ArithmeticOpTable.forStamp(stamp(NodeView.DEFAULT)));
return reverse.foldConstant(getResultBits(), getInputBits(), c);
}
@@ -108,7 +98,7 @@
@Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
- ValueNode synonym = findSynonym(getOp(forValue), forValue, inputBits, resultBits, stamp(NodeView.DEFAULT));
+ ValueNode synonym = findSynonym(getOp(getArithmeticOpTable(forValue)), forValue, inputBits, resultBits, stamp(NodeView.DEFAULT));
if (synonym != null) {
return synonym;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerMulHighNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerMulHighNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -28,6 +28,7 @@
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_2;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp.MulHigh;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.Canonicalizable;
@@ -48,7 +49,12 @@
public static final NodeClass<IntegerMulHighNode> TYPE = NodeClass.create(IntegerMulHighNode.class);
public IntegerMulHighNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getMulHigh, x, y);
+ super(TYPE, getArithmeticOpTable(x).getMulHigh(), x, y);
+ }
+
+ @Override
+ protected BinaryOp<MulHigh> getOp(ArithmeticOpTable table) {
+ return table.getMulHigh();
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/LeftShiftNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/LeftShiftNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,10 @@
package org.graalvm.compiler.nodes.calc;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
+
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.ShiftOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.ShiftOp.Shl;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
@@ -45,7 +48,7 @@
public static final NodeClass<LeftShiftNode> TYPE = NodeClass.create(LeftShiftNode.class);
public LeftShiftNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getShl, x, y);
+ super(TYPE, getArithmeticOpTable(x).getShl(), x, y);
}
public static ValueNode create(ValueNode x, ValueNode y, NodeView view) {
@@ -60,6 +63,11 @@
}
@Override
+ protected ShiftOp<Shl> getOp(ArithmeticOpTable table) {
+ return table.getShl();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
ValueNode ret = super.canonical(tool, forX, forY);
if (ret != this) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/MulNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/MulNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,9 +27,9 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_2;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
-import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp.Mul;
+import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.Canonicalizable.BinaryCommutative;
@@ -56,7 +56,7 @@
}
protected MulNode(NodeClass<? extends MulNode> c, ValueNode x, ValueNode y) {
- super(c, ArithmeticOpTable::getMul, x, y);
+ super(c, getArithmeticOpTable(x).getMul(), x, y);
}
public static ValueNode create(ValueNode x, ValueNode y, NodeView view) {
@@ -70,6 +70,11 @@
}
@Override
+ protected BinaryOp<Mul> getOp(ArithmeticOpTable table) {
+ return table.getMul();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
ValueNode ret = super.canonical(tool, forX, forY);
if (ret != this) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/NarrowNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/NarrowNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,7 @@
package org.graalvm.compiler.nodes.calc;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
@@ -58,7 +59,7 @@
}
public NarrowNode(ValueNode input, int inputBits, int resultBits) {
- super(TYPE, ArithmeticOpTable::getNarrow, ArithmeticOpTable::getSignExtend, inputBits, resultBits, input);
+ super(TYPE, getArithmeticOpTable(input).getNarrow(), inputBits, resultBits, input);
}
public static ValueNode create(ValueNode input, int resultBits, NodeView view) {
@@ -76,6 +77,16 @@
}
@Override
+ protected IntegerConvertOp<Narrow> getOp(ArithmeticOpTable table) {
+ return table.getNarrow();
+ }
+
+ @Override
+ protected IntegerConvertOp<SignExtend> getReverseOp(ArithmeticOpTable table) {
+ return table.getSignExtend();
+ }
+
+ @Override
public boolean isLossless() {
return checkLossless(this.getResultBits());
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/NegateNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/NegateNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,10 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_2;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp.Neg;
import org.graalvm.compiler.core.common.type.FloatStamp;
import org.graalvm.compiler.core.common.type.Stamp;
@@ -49,7 +51,7 @@
public static final NodeClass<NegateNode> TYPE = NodeClass.create(NegateNode.class);
public NegateNode(ValueNode value) {
- super(TYPE, ArithmeticOpTable::getNeg, value);
+ super(TYPE, getArithmeticOpTable(value).getNeg(), value);
}
public static ValueNode create(ValueNode value, NodeView view) {
@@ -61,6 +63,11 @@
}
@Override
+ protected UnaryOp<Neg> getOp(ArithmeticOpTable table) {
+ return table.getNeg();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
ValueNode synonym = findSynonym(forValue, getOp(forValue));
if (synonym != null) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/NotNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/NotNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,10 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp.Not;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.graph.NodeClass;
@@ -48,7 +50,7 @@
public static final NodeClass<NotNode> TYPE = NodeClass.create(NotNode.class);
protected NotNode(ValueNode x) {
- super(TYPE, ArithmeticOpTable::getNot, x);
+ super(TYPE, getArithmeticOpTable(x).getNot(), x);
}
public static ValueNode create(ValueNode x) {
@@ -56,6 +58,11 @@
}
@Override
+ protected UnaryOp<Not> getOp(ArithmeticOpTable table) {
+ return table.getNot();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
ValueNode ret = super.canonical(tool, forValue);
if (ret != this) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ObjectEqualsNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ObjectEqualsNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,13 @@
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.extended.BoxNode;
import org.graalvm.compiler.nodes.extended.GetClassNode;
+import org.graalvm.compiler.nodes.java.AbstractNewObjectNode;
import org.graalvm.compiler.nodes.java.InstanceOfNode;
import org.graalvm.compiler.nodes.spi.Virtualizable;
import org.graalvm.compiler.nodes.spi.VirtualizerTool;
+import org.graalvm.compiler.nodes.virtual.AllocatedObjectNode;
import org.graalvm.compiler.nodes.virtual.VirtualBoxingNode;
import org.graalvm.compiler.nodes.virtual.VirtualObjectNode;
import org.graalvm.compiler.options.OptionValues;
@@ -112,6 +115,11 @@
}
return LogicConstantNode.forBoolean(false);
}
+ if (nonConstant instanceof AbstractNewObjectNode || nonConstant instanceof AllocatedObjectNode) {
+ assert !(nonConstant instanceof BoxNode); // guard against class hierarchy changes
+ // a constant can never be equals to a new object
+ return LogicConstantNode.forBoolean(false);
+ }
return super.canonicalizeSymmetricConstant(constantReflection, metaAccess, options, smallestCompareWidth, condition, constant, nonConstant, mirrored, unorderedIsTrue, view);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/OrNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/OrNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@
public static final NodeClass<OrNode> TYPE = NodeClass.create(OrNode.class);
public OrNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getOr, x, y);
+ super(TYPE, getArithmeticOpTable(x).getOr(), x, y);
}
public static ValueNode create(ValueNode x, ValueNode y, NodeView view) {
@@ -64,6 +64,11 @@
}
@Override
+ protected BinaryOp<Or> getOp(ArithmeticOpTable table) {
+ return table.getOr();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
NodeView view = NodeView.from(tool);
ValueNode ret = super.canonical(tool, forX, forY);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/RemNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/RemNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@
}
protected RemNode(NodeClass<? extends RemNode> c, ValueNode x, ValueNode y) {
- super(c, ArithmeticOpTable::getRem, x, y);
+ super(c, getArithmeticOpTable(x).getRem(), x, y);
}
public static ValueNode create(ValueNode forX, ValueNode forY, NodeView view) {
@@ -64,6 +64,11 @@
}
@Override
+ protected BinaryOp<Rem> getOp(ArithmeticOpTable table) {
+ return table.getRem();
+ }
+
+ @Override
public void lower(LoweringTool tool) {
tool.getLowerer().lower(this, tool);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/RightShiftNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/RightShiftNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,10 @@
package org.graalvm.compiler.nodes.calc;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
+
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.ShiftOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.ShiftOp.Shr;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
@@ -45,7 +48,7 @@
public static final NodeClass<RightShiftNode> TYPE = NodeClass.create(RightShiftNode.class);
public RightShiftNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getShr, x, y);
+ super(TYPE, getArithmeticOpTable(x).getShr(), x, y);
}
public static ValueNode create(ValueNode x, int y, NodeView view) {
@@ -67,6 +70,11 @@
}
@Override
+ protected ShiftOp<Shr> getOp(ArithmeticOpTable table) {
+ return table.getShr();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
NodeView view = NodeView.from(tool);
ValueNode ret = super.canonical(tool, forX, forY);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ShiftNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ShiftNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,7 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
-
-import java.io.Serializable;
-import java.util.function.Function;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.ShiftOp;
@@ -55,25 +53,21 @@
@SuppressWarnings("rawtypes") public static final NodeClass<ShiftNode> TYPE = NodeClass.create(ShiftNode.class);
- protected interface SerializableShiftFunction<T> extends Function<ArithmeticOpTable, ShiftOp<T>>, Serializable {
- }
-
- protected final SerializableShiftFunction<OP> getOp;
-
/**
* Creates a new shift operation.
*
* @param x the first input value
* @param s the second input value
*/
- protected ShiftNode(NodeClass<? extends ShiftNode<OP>> c, SerializableShiftFunction<OP> getOp, ValueNode x, ValueNode s) {
- super(c, getOp.apply(ArithmeticOpTable.forStamp(x.stamp(NodeView.DEFAULT))).foldStamp(x.stamp(NodeView.DEFAULT), (IntegerStamp) s.stamp(NodeView.DEFAULT)), x, s);
+ protected ShiftNode(NodeClass<? extends ShiftNode<OP>> c, ShiftOp<OP> opForStampComputation, ValueNode x, ValueNode s) {
+ super(c, opForStampComputation.foldStamp(x.stamp(NodeView.DEFAULT), (IntegerStamp) s.stamp(NodeView.DEFAULT)), x, s);
assert ((IntegerStamp) s.stamp(NodeView.DEFAULT)).getBits() == 32;
- this.getOp = getOp;
}
+ protected abstract ShiftOp<OP> getOp(ArithmeticOpTable table);
+
protected final ShiftOp<OP> getOp(ValueNode forValue) {
- return getOp.apply(ArithmeticOpTable.forStamp(forValue.stamp(NodeView.DEFAULT)));
+ return getOp(getArithmeticOpTable(forValue));
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SignExtendNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SignExtendNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,8 @@
package org.graalvm.compiler.nodes.calc;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
-import jdk.vm.ci.code.CodeUtil;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.IntegerConvertOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.IntegerConvertOp.Narrow;
@@ -42,6 +42,8 @@
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
+import jdk.vm.ci.code.CodeUtil;
+
/**
* The {@code SignExtendNode} converts an integer to a wider integer using sign extension.
*/
@@ -56,7 +58,7 @@
}
public SignExtendNode(ValueNode input, int inputBits, int resultBits) {
- super(TYPE, ArithmeticOpTable::getSignExtend, ArithmeticOpTable::getNarrow, inputBits, resultBits, input);
+ super(TYPE, getArithmeticOpTable(input).getSignExtend(), inputBits, resultBits, input);
}
public static ValueNode create(ValueNode input, int resultBits, NodeView view) {
@@ -73,6 +75,16 @@
}
@Override
+ protected IntegerConvertOp<SignExtend> getOp(ArithmeticOpTable table) {
+ return table.getSignExtend();
+ }
+
+ @Override
+ protected IntegerConvertOp<Narrow> getReverseOp(ArithmeticOpTable table) {
+ return table.getNarrow();
+ }
+
+ @Override
public boolean isLossless() {
return true;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SqrtNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SqrtNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,10 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_16;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp.Sqrt;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.lir.gen.ArithmeticLIRGeneratorTool;
@@ -47,7 +49,7 @@
public static final NodeClass<SqrtNode> TYPE = NodeClass.create(SqrtNode.class);
protected SqrtNode(ValueNode x) {
- super(TYPE, ArithmeticOpTable::getSqrt, x);
+ super(TYPE, getArithmeticOpTable(x).getSqrt(), x);
}
public static ValueNode create(ValueNode x, NodeView view) {
@@ -59,6 +61,11 @@
}
@Override
+ protected UnaryOp<Sqrt> getOp(ArithmeticOpTable table) {
+ return table.getSqrt();
+ }
+
+ @Override
public void generate(NodeLIRBuilderTool nodeValueMap, ArithmeticLIRGeneratorTool gen) {
nodeValueMap.setResult(this, gen.emitMathSqrt(nodeValueMap.operand(getValue())));
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SubNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SubNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
}
protected SubNode(NodeClass<? extends SubNode> c, ValueNode x, ValueNode y) {
- super(c, ArithmeticOpTable::getSub, x, y);
+ super(c, getArithmeticOpTable(x).getSub(), x, y);
}
public static ValueNode create(ValueNode x, ValueNode y, NodeView view) {
@@ -66,6 +66,11 @@
return canonical(null, op, stamp, x, y, view);
}
+ @Override
+ protected BinaryOp<Sub> getOp(ArithmeticOpTable table) {
+ return table.getSub();
+ }
+
private static ValueNode canonical(SubNode subNode, BinaryOp<Sub> op, Stamp stamp, ValueNode forX, ValueNode forY, NodeView view) {
SubNode self = subNode;
if (GraphUtil.unproxify(forX) == GraphUtil.unproxify(forY)) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/UnaryArithmeticNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/UnaryArithmeticNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,7 @@
package org.graalvm.compiler.nodes.calc;
-import java.io.Serializable;
-import java.util.function.Function;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp;
@@ -44,18 +43,14 @@
@SuppressWarnings("rawtypes") public static final NodeClass<UnaryArithmeticNode> TYPE = NodeClass.create(UnaryArithmeticNode.class);
- protected interface SerializableUnaryFunction<T> extends Function<ArithmeticOpTable, UnaryOp<T>>, Serializable {
+ protected UnaryArithmeticNode(NodeClass<? extends UnaryArithmeticNode<OP>> c, UnaryOp<OP> opForStampComputation, ValueNode value) {
+ super(c, opForStampComputation.foldStamp(value.stamp(NodeView.DEFAULT)), value);
}
- protected final SerializableUnaryFunction<OP> getOp;
-
- protected UnaryArithmeticNode(NodeClass<? extends UnaryArithmeticNode<OP>> c, SerializableUnaryFunction<OP> getOp, ValueNode value) {
- super(c, getOp.apply(ArithmeticOpTable.forStamp(value.stamp(NodeView.DEFAULT))).foldStamp(value.stamp(NodeView.DEFAULT)), value);
- this.getOp = getOp;
- }
+ protected abstract UnaryOp<OP> getOp(ArithmeticOpTable table);
protected final UnaryOp<OP> getOp(ValueNode forValue) {
- return getOp.apply(ArithmeticOpTable.forStamp(forValue.stamp(NodeView.DEFAULT)));
+ return getOp(getArithmeticOpTable(forValue));
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/UnsignedRightShiftNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/UnsignedRightShiftNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -24,8 +24,10 @@
package org.graalvm.compiler.nodes.calc;
-import jdk.vm.ci.code.CodeUtil;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
+
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.ShiftOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.ShiftOp.UShr;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
@@ -38,6 +40,7 @@
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
+import jdk.vm.ci.code.CodeUtil;
import jdk.vm.ci.meta.JavaKind;
@NodeInfo(shortName = ">>>")
@@ -46,7 +49,7 @@
public static final NodeClass<UnsignedRightShiftNode> TYPE = NodeClass.create(UnsignedRightShiftNode.class);
public UnsignedRightShiftNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getUShr, x, y);
+ super(TYPE, getArithmeticOpTable(x).getUShr(), x, y);
}
public static ValueNode create(ValueNode x, ValueNode y, NodeView view) {
@@ -61,6 +64,11 @@
}
@Override
+ protected ShiftOp<UShr> getOp(ArithmeticOpTable table) {
+ return table.getUShr();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
NodeView view = NodeView.from(tool);
ValueNode ret = super.canonical(tool, forX, forY);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/XorNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/XorNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@
public static final NodeClass<XorNode> TYPE = NodeClass.create(XorNode.class);
public XorNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getXor, x, y);
+ super(TYPE, getArithmeticOpTable(x).getXor(), x, y);
assert x.stamp(NodeView.DEFAULT).isCompatible(y.stamp(NodeView.DEFAULT));
}
@@ -65,6 +65,11 @@
}
@Override
+ protected BinaryOp<Xor> getOp(ArithmeticOpTable table) {
+ return table.getXor();
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
ValueNode ret = super.canonical(tool, forX, forY);
if (ret != this) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ZeroExtendNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ZeroExtendNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
package org.graalvm.compiler.nodes.calc;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
@@ -60,7 +61,7 @@
}
public ZeroExtendNode(ValueNode input, int inputBits, int resultBits, boolean inputAlwaysPositive) {
- super(TYPE, ArithmeticOpTable::getZeroExtend, ArithmeticOpTable::getNarrow, inputBits, resultBits, input);
+ super(TYPE, getArithmeticOpTable(input).getZeroExtend(), inputBits, resultBits, input);
this.inputAlwaysPositive = inputAlwaysPositive;
}
@@ -82,6 +83,16 @@
}
@Override
+ protected IntegerConvertOp<ZeroExtend> getOp(ArithmeticOpTable table) {
+ return table.getZeroExtend();
+ }
+
+ @Override
+ protected IntegerConvertOp<Narrow> getReverseOp(ArithmeticOpTable table) {
+ return table.getNarrow();
+ }
+
+ @Override
public boolean isLossless() {
return true;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/IntegerSwitchNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/IntegerSwitchNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -429,11 +429,14 @@
}
/*
- * Collect dead successors. Successors have to be cleaned before adding the new node to the
- * graph.
+ * Surviving successors have to be cleaned before adding the new node to the graph. Keep the
+ * dead ones attached to the old node for later cleanup.
*/
- List<AbstractBeginNode> deadSuccessors = successors.filter(s -> !newSuccessors.contains(s)).snapshot();
- successors.clear();
+ for (int i = 0; i < successors.size(); i++) {
+ if (newSuccessors.contains(successors.get(i))) {
+ successors.set(i, null);
+ }
+ }
/*
* Create the new switch node. This is done before removing dead successors as `killCFG`
@@ -443,14 +446,11 @@
AbstractBeginNode[] successorsArray = newSuccessors.toArray(new AbstractBeginNode[newSuccessors.size()]);
SwitchNode newSwitch = graph().add(new IntegerSwitchNode(newValue, successorsArray, newKeys, newKeyProbabilities, newKeySuccessors));
- /* Remove dead successors. */
- for (AbstractBeginNode successor : deadSuccessors) {
- GraphUtil.killCFG(successor);
- }
-
/* Replace ourselves with the new switch */
((FixedWithNextNode) predecessor()).setNext(newSwitch);
- GraphUtil.killWithUnusedFloatingInputs(this);
+
+ // Remove the old switch and the dead successors.
+ GraphUtil.killCFG(this);
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/spi/LoweringProvider.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/spi/LoweringProvider.java Fri Oct 11 12:08:01 2019 +0530
@@ -53,8 +53,7 @@
Integer smallestCompareWidth();
/**
- * Returns the granularity in terms of bytes that this target platform's bulk zeroing supports.
- * Returns 0 to indicate that this target platform does not support bulk zeroing instruction.
+ * Indicates whether this target platform supports bulk zeroing of arbitrary size.
*/
- int bulkZeroingStride();
+ boolean supportsBulkZeroing();
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/util/GraphUtil.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/util/GraphUtil.java Fri Oct 11 12:08:01 2019 +0530
@@ -742,19 +742,22 @@
/**
* Tries to find an original value of the given node by traversing through proxies and
- * unambiguous phis. Note that this method will perform an exhaustive search through phis. It is
- * intended to be used during graph building, when phi nodes aren't yet canonicalized.
+ * unambiguous phis. Note that this method will perform an exhaustive search through phis.
*
- * @param value The node whose original value should be determined.
- * @return The original value (which might be the input value itself).
+ * @param value the node whose original value should be determined
+ * @param abortOnLoopPhi specifies if the traversal through phis should stop and return
+ * {@code value} if it hits a {@linkplain PhiNode#isLoopPhi loop phi}. This argument
+ * must be {@code true} if used during graph building as loop phi nodes may not yet
+ * have all their inputs computed.
+ * @return the original value (which might be {@code value} itself)
*/
- public static ValueNode originalValue(ValueNode value) {
- ValueNode result = originalValueSimple(value);
+ public static ValueNode originalValue(ValueNode value, boolean abortOnLoopPhi) {
+ ValueNode result = originalValueSimple(value, abortOnLoopPhi);
assert result != null;
return result;
}
- private static ValueNode originalValueSimple(ValueNode value) {
+ private static ValueNode originalValueSimple(ValueNode value, boolean abortOnLoopPhi) {
/* The very simple case: look through proxies. */
ValueNode cur = originalValueForProxy(value);
@@ -765,6 +768,10 @@
*/
PhiNode phi = (PhiNode) cur;
+ if (abortOnLoopPhi && phi.isLoopPhi()) {
+ return value;
+ }
+
ValueNode phiSingleValue = null;
int count = phi.valueCount();
for (int i = 0; i < count; ++i) {
@@ -783,7 +790,7 @@
* of the inputs is another phi function. We need to do a complicated
* exhaustive check.
*/
- return originalValueForComplicatedPhi(phi, new NodeBitMap(value.graph()));
+ return originalValueForComplicatedPhi(value, phi, new NodeBitMap(value.graph()), abortOnLoopPhi);
} else {
/*
* We have two different input values for the phi function, but none of them
@@ -819,8 +826,12 @@
/**
* Handling for complicated nestings of phi functions. We need to reduce phi functions
* recursively, and need a temporary map of visited nodes to avoid endless recursion of cycles.
+ *
+ * @param value the node whose original value is being determined
+ * @param abortOnLoopPhi specifies if the traversal through phis should stop and return
+ * {@code value} if it hits a {@linkplain PhiNode#isLoopPhi loop phi}
*/
- private static ValueNode originalValueForComplicatedPhi(PhiNode phi, NodeBitMap visited) {
+ private static ValueNode originalValueForComplicatedPhi(ValueNode value, PhiNode phi, NodeBitMap visited, boolean abortOnLoopPhi) {
if (visited.isMarked(phi)) {
/*
* Found a phi function that was already seen. Either a cycle, or just a second phi
@@ -836,7 +847,16 @@
ValueNode phiCurValue = originalValueForProxy(phi.valueAt(i));
if (phiCurValue instanceof PhiNode) {
/* Recursively process a phi function input. */
- phiCurValue = originalValueForComplicatedPhi((PhiNode) phiCurValue, visited);
+ PhiNode curPhi = (PhiNode) phiCurValue;
+ if (abortOnLoopPhi && curPhi.isLoopPhi()) {
+ return value;
+ }
+ phiCurValue = originalValueForComplicatedPhi(value, curPhi, visited, abortOnLoopPhi);
+ if (phiCurValue == value) {
+ // Hit a loop phi
+ assert abortOnLoopPhi;
+ return value;
+ }
}
if (phiCurValue == null) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64GraphBuilderPlugins.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64GraphBuilderPlugins.java Fri Oct 11 12:08:01 2019 +0530
@@ -47,6 +47,7 @@
import org.graalvm.compiler.nodes.memory.address.OffsetAddressNode;
import org.graalvm.compiler.replacements.TargetGraphBuilderPlugins;
import org.graalvm.compiler.replacements.nodes.BinaryMathIntrinsicNode;
+import org.graalvm.compiler.replacements.nodes.FusedMultiplyAddNode;
import org.graalvm.compiler.replacements.nodes.UnaryMathIntrinsicNode;
import org.graalvm.compiler.replacements.nodes.UnaryMathIntrinsicNode.UnaryOperation;
import org.graalvm.compiler.serviceprovider.JavaVersionUtil;
@@ -61,16 +62,11 @@
@Override
public void register(Plugins plugins, BytecodeProvider replacementsBytecodeProvider, Architecture arch, boolean explicitUnsafeNullChecks, boolean registerMathPlugins,
boolean emitJDK9StringSubstitutions, boolean useFMAIntrinsics) {
- register(plugins, replacementsBytecodeProvider, explicitUnsafeNullChecks, registerMathPlugins, emitJDK9StringSubstitutions);
+ register(plugins, replacementsBytecodeProvider, explicitUnsafeNullChecks, registerMathPlugins, emitJDK9StringSubstitutions, useFMAIntrinsics);
}
public static void register(Plugins plugins, BytecodeProvider bytecodeProvider, boolean explicitUnsafeNullChecks,
- boolean registerMathPlugins) {
- register(plugins, bytecodeProvider, explicitUnsafeNullChecks, registerMathPlugins, true);
- }
-
- public static void register(Plugins plugins, BytecodeProvider bytecodeProvider, boolean explicitUnsafeNullChecks,
- boolean registerMathPlugins, boolean emitJDK9StringSubstitutions) {
+ boolean registerMathPlugins, boolean emitJDK9StringSubstitutions, boolean useFMAIntrinsics) {
InvocationPlugins invocationPlugins = plugins.getInvocationPlugins();
invocationPlugins.defer(new Runnable() {
@Override
@@ -78,7 +74,7 @@
registerIntegerLongPlugins(invocationPlugins, JavaKind.Int, bytecodeProvider);
registerIntegerLongPlugins(invocationPlugins, JavaKind.Long, bytecodeProvider);
if (registerMathPlugins) {
- registerMathPlugins(invocationPlugins);
+ registerMathPlugins(invocationPlugins, useFMAIntrinsics);
}
if (emitJDK9StringSubstitutions) {
registerStringLatin1Plugins(invocationPlugins, bytecodeProvider);
@@ -130,7 +126,7 @@
});
}
- private static void registerMathPlugins(InvocationPlugins plugins) {
+ private static void registerMathPlugins(InvocationPlugins plugins, boolean useFMAIntrinsics) {
Registration r = new Registration(plugins, Math.class);
registerUnaryMath(r, "sin", SIN);
registerUnaryMath(r, "cos", COS);
@@ -148,6 +144,36 @@
registerRound(r, "rint", RoundingMode.NEAREST);
registerRound(r, "ceil", RoundingMode.UP);
registerRound(r, "floor", RoundingMode.DOWN);
+ if (useFMAIntrinsics && JavaVersionUtil.JAVA_SPEC > 8) {
+ registerFMA(r);
+ }
+ }
+
+ private static void registerFMA(Registration r) {
+ r.register3("fma", Double.TYPE, Double.TYPE, Double.TYPE, new InvocationPlugin() {
+ @Override
+ public boolean apply(GraphBuilderContext b,
+ ResolvedJavaMethod targetMethod,
+ Receiver receiver,
+ ValueNode na,
+ ValueNode nb,
+ ValueNode nc) {
+ b.push(JavaKind.Double, b.append(new FusedMultiplyAddNode(na, nb, nc)));
+ return true;
+ }
+ });
+ r.register3("fma", Float.TYPE, Float.TYPE, Float.TYPE, new InvocationPlugin() {
+ @Override
+ public boolean apply(GraphBuilderContext b,
+ ResolvedJavaMethod targetMethod,
+ Receiver receiver,
+ ValueNode na,
+ ValueNode nb,
+ ValueNode nc) {
+ b.push(JavaKind.Float, b.append(new FusedMultiplyAddNode(na, nb, nc)));
+ return true;
+ }
+ });
}
private static void registerUnaryMath(Registration r, String name, UnaryOperation operation) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64FloatConvertNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64FloatConvertNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,12 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_8;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
+import static org.graalvm.compiler.nodes.calc.BinaryArithmeticNode.getArithmeticOpTable;
-import jdk.vm.ci.meta.JavaConstant;
import org.graalvm.compiler.core.common.calc.FloatConvert;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.FloatConvertOp;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.UnaryOp;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.core.common.type.StampFactory;
@@ -43,6 +45,8 @@
import org.graalvm.compiler.nodes.spi.ArithmeticLIRLowerable;
import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
+import jdk.vm.ci.meta.JavaConstant;
+
/**
* This node has the semantics of the AMD64 floating point conversions. It is used in the lowering
* of the {@link FloatConvertNode} which, on AMD64 needs a {@link AMD64FloatConvertNode} plus some
@@ -58,12 +62,17 @@
protected final FloatConvert op;
public AMD64FloatConvertNode(FloatConvert op, ValueNode value) {
- super(TYPE, table -> table.getFloatConvert(op), value);
+ super(TYPE, getArithmeticOpTable(value).getFloatConvert(op), value);
this.op = op;
this.stamp = this.stamp.meet(createInexactCaseStamp());
}
@Override
+ protected UnaryOp<FloatConvertOp> getOp(ArithmeticOpTable table) {
+ return table.getFloatConvert(op);
+ }
+
+ @Override
public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
// nothing to do
return this;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/classfile/ClassfileBytecodeProviderTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/classfile/ClassfileBytecodeProviderTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -87,13 +87,6 @@
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
-import org.graalvm.compiler.test.ModuleSupport;
-import org.graalvm.compiler.test.SubprocessUtil;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-
import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.api.test.Graal;
import org.graalvm.compiler.bytecode.Bytecode;
@@ -111,6 +104,12 @@
import org.graalvm.compiler.replacements.classfile.ClassfileBytecodeProvider;
import org.graalvm.compiler.runtime.RuntimeProvider;
import org.graalvm.compiler.serviceprovider.JavaVersionUtil;
+import org.graalvm.compiler.test.ModuleSupport;
+import org.graalvm.compiler.test.SubprocessUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
import jdk.vm.ci.meta.ConstantPool;
import jdk.vm.ci.meta.JavaField;
@@ -209,6 +208,17 @@
}
try {
checkClass(metaAccess, getSnippetReflection(), className);
+ } catch (UnsupportedClassVersionError e) {
+ // graal-test.jar can contain classes compiled for different
+ // Java versions
+ } catch (NoClassDefFoundError e) {
+ if (!e.getMessage().contains("Could not initialize class")) {
+ throw e;
+ } else {
+ // A second or later attempt to initialize a class
+ // results in this confusing error where the
+ // original cause of initialization failure is lost
+ }
} catch (ClassNotFoundException e) {
throw new AssertionError(e);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Fri Oct 11 12:08:01 2019 +0530
@@ -835,7 +835,7 @@
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver) {
ValueNode object = receiver.get();
- ValueNode folded = GetClassNode.tryFold(b.getMetaAccess(), b.getConstantReflection(), NodeView.DEFAULT, GraphUtil.originalValue(object));
+ ValueNode folded = GetClassNode.tryFold(b.getMetaAccess(), b.getConstantReflection(), NodeView.DEFAULT, GraphUtil.originalValue(object, true));
if (folded != null) {
b.addPush(JavaKind.Object, folded);
} else {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/ZeroMemoryNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/ZeroMemoryNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -48,19 +48,21 @@
public static final NodeClass<ZeroMemoryNode> TYPE = NodeClass.create(ZeroMemoryNode.class);
@Input ValueNode length;
+ private final boolean isAligned;
- public ZeroMemoryNode(ValueNode address, ValueNode length, LocationIdentity locationIdentity) {
- this(OffsetAddressNode.create(address), length, locationIdentity, BarrierType.NONE);
+ public ZeroMemoryNode(ValueNode address, ValueNode length, boolean isAligned, LocationIdentity locationIdentity) {
+ this(OffsetAddressNode.create(address), length, isAligned, locationIdentity, BarrierType.NONE);
}
- public ZeroMemoryNode(AddressNode address, ValueNode length, LocationIdentity locationIdentity, BarrierType type) {
+ public ZeroMemoryNode(AddressNode address, ValueNode length, boolean isAligned, LocationIdentity locationIdentity, BarrierType type) {
super(TYPE, address, locationIdentity, StampFactory.forVoid(), type);
this.length = length;
+ this.isAligned = isAligned;
}
@Override
public void generate(NodeLIRBuilderTool gen) {
- gen.getLIRGeneratorTool().emitZeroMemory(gen.operand(getAddress()), gen.operand(length));
+ gen.getLIRGeneratorTool().emitZeroMemory(gen.operand(getAddress()), gen.operand(length), isAligned);
}
@Override
@@ -69,5 +71,5 @@
}
@NodeIntrinsic
- public static native void zero(Word address, long length, @ConstantNodeParameter LocationIdentity locationIdentity);
+ public static native void zero(Word address, long length, @ConstantNodeParameter boolean isAligned, @ConstantNodeParameter LocationIdentity locationIdentity);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/arithmetic/UnsignedMulHighNode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/arithmetic/UnsignedMulHighNode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_2;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
+import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp.UMulHigh;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.Canonicalizable;
@@ -49,7 +50,12 @@
public static final NodeClass<UnsignedMulHighNode> TYPE = NodeClass.create(UnsignedMulHighNode.class);
public UnsignedMulHighNode(ValueNode x, ValueNode y) {
- super(TYPE, ArithmeticOpTable::getUMulHigh, x, y);
+ super(TYPE, getArithmeticOpTable(x).getUMulHigh(), x, y);
+ }
+
+ @Override
+ protected BinaryOp<UMulHigh> getOp(ArithmeticOpTable table) {
+ return table.getUMulHigh();
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.graphio/src/org/graalvm/graphio/DefaultGraphTypes.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.graphio/src/org/graalvm/graphio/DefaultGraphTypes.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,10 +30,17 @@
private DefaultGraphTypes() {
}
+ @SuppressWarnings("unchecked")
@Override
public Class<?> enumClass(Object enumValue) {
if (enumValue instanceof Enum<?>) {
- return enumValue.getClass();
+ // check that the enum class is not actually an anonymous subclass:
+ Class<? extends Enum<?>> enumClass = (Class<? extends Enum<?>>) enumValue.getClass();
+ Enum<?>[] constants = enumClass.getEnumConstants();
+ if (constants == null && enumClass.isAnonymousClass()) {
+ enumClass = (Class<? extends Enum<?>>) enumClass.getSuperclass();
+ }
+ return enumClass;
}
return null;
}
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/package-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/package-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,7 +25,7 @@
/**
* Doclets provide the user-selectable backends for processing the
- * documentation comnments in Java source code.
+ * documentation comments in Java source code.
*
* <p>Doclets are implementations of the {@link jdk.javadoc.doclet Doclet API}.</p>
*
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/builders/MemberSummaryBuilder.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/builders/MemberSummaryBuilder.java Fri Oct 11 12:08:01 2019 +0530
@@ -320,7 +320,7 @@
//necessary.
DocFinder.Output inheritedDoc =
DocFinder.search(configuration,
- new DocFinder.Input(utils, (ExecutableElement) member));
+ new DocFinder.Input(utils, member));
if (inheritedDoc.holder != null
&& !utils.getFirstSentenceTrees(inheritedDoc.holder).isEmpty()) {
// let the comment helper know of the overridden element
@@ -473,7 +473,7 @@
private void addSummaryFootNote(TypeElement inheritedClass, SortedSet<Element> inheritedMembers,
Content linksTree, MemberSummaryWriter writer) {
for (Element member : inheritedMembers) {
- TypeElement t = (utils.isPackagePrivate(inheritedClass) && !utils.isLinkable(inheritedClass))
+ TypeElement t = utils.isUndocumentedEnclosure(inheritedClass)
? typeElement : inheritedClass;
writer.addInheritedMemberSummary(t, member, inheritedMembers.first() == member,
inheritedMembers.last() == member, linksTree);
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/Utils.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/Utils.java Fri Oct 11 12:08:01 2019 +0530
@@ -529,6 +529,10 @@
return true;
}
+ public boolean isUndocumentedEnclosure(TypeElement enclosingTypeElement) {
+ return isPackagePrivate(enclosingTypeElement) && !isLinkable(enclosingTypeElement);
+ }
+
public boolean isError(TypeElement te) {
if (isEnum(te) || isInterface(te) || isAnnotationType(te)) {
return false;
@@ -1064,8 +1068,7 @@
// Allow for the behavior that members of undocumented supertypes
// may be included in documented types
- TypeElement enclElem = getEnclosingTypeElement(elem);
- if (typeElem != enclElem && isSubclassOf(typeElem, enclElem)) {
+ if (isUndocumentedEnclosure(getEnclosingTypeElement(elem))) {
return true;
}
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/VisibleMemberTable.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/VisibleMemberTable.java Fri Oct 11 12:08:01 2019 +0530
@@ -209,7 +209,7 @@
public List<? extends Element> getVisibleMembers(Kind kind) {
Predicate<Element> declaredAndLeafMembers = e -> {
TypeElement encl = utils.getEnclosingTypeElement(e);
- return encl == te || isUndocumentedEnclosure(encl);
+ return encl == te || utils.isUndocumentedEnclosure(encl);
};
return getVisibleMembers(kind, declaredAndLeafMembers);
}
@@ -238,7 +238,8 @@
ensureInitialized();
OverridingMethodInfo found = overriddenMethodTable.get(e);
- if (found != null && (found.simpleOverride || isUndocumentedEnclosure(utils.getEnclosingTypeElement(e)))) {
+ if (found != null
+ && (found.simpleOverride || utils.isUndocumentedEnclosure(utils.getEnclosingTypeElement(e)))) {
return found.overrider;
}
return null;
@@ -347,10 +348,6 @@
return pm == null ? null : pm.setter;
}
- boolean isUndocumentedEnclosure(TypeElement encl) {
- return utils.isPackagePrivate(encl) && !utils.isLinkable(encl);
- }
-
private void computeParents() {
for (TypeMirror intfType : te.getInterfaces()) {
TypeElement intfc = utils.asTypeElement(intfType);
@@ -388,7 +385,7 @@
private void computeLeafMembers(LocalMemberTable lmt, Kind kind) {
List<Element> list = new ArrayList<>();
- if (isUndocumentedEnclosure(te)) {
+ if (utils.isUndocumentedEnclosure(te)) {
list.addAll(lmt.getOrderedMembers(kind));
}
parents.forEach(pvmt -> {
@@ -617,7 +614,7 @@
// Disallow package-private super methods to leak in
TypeElement encl = utils.getEnclosingTypeElement(inheritedMethod);
- if (isUndocumentedEnclosure(encl)) {
+ if (utils.isUndocumentedEnclosure(encl)) {
overriddenMethodTable.computeIfAbsent(lMethod,
l -> new OverridingMethodInfo(inheritedMethod, false));
return false;
--- a/src/jdk.jcmd/share/classes/sun/tools/common/ProcessArgumentMatcher.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.jcmd/share/classes/sun/tools/common/ProcessArgumentMatcher.java Fri Oct 11 12:08:01 2019 +0530
@@ -140,18 +140,14 @@
return vids;
}
- public Collection<VirtualMachineDescriptor> getVirtualMachineDescriptors(Class<?> excludeClass) {
+ public Collection<VirtualMachineDescriptor> getVirtualMachineDescriptors() {
if (singlePid != null) {
return getSingleVMD(singlePid);
} else {
- return getVMDs(excludeClass, matchClass);
+ return getVMDs(null, matchClass);
}
}
- public Collection<VirtualMachineDescriptor> getVirtualMachineDescriptors() {
- return this.getVirtualMachineDescriptors(null);
- }
-
public Collection<String> getVirtualMachinePids(Class<?> excludeClass) {
if (singlePid != null) {
// There is a bug in AttachProvider, when VM is debuggee-suspended it's not listed by the AttachProvider.
@@ -162,7 +158,4 @@
}
}
- public Collection<String> getVirtualMachinePids() {
- return this.getVirtualMachinePids(null);
- }
}
--- a/src/jdk.jdwp.agent/share/native/libdt_socket/socketTransport.c Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.jdwp.agent/share/native/libdt_socket/socketTransport.c Fri Oct 11 12:08:01 2019 +0530
@@ -511,7 +511,7 @@
if (buffer == NULL) {
RETURN_ERROR(JDWPTRANSPORT_ERROR_OUT_OF_MEMORY, "out of memory");
}
- strncpy(buffer, allowed_peers, len);
+ memcpy(buffer, allowed_peers, len);
buffer[len] = '\0';
jdwpTransportError err = parseAllowedPeersInternal(buffer);
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecording.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecording.java Fri Oct 11 12:08:01 2019 +0530
@@ -366,10 +366,16 @@
public void setDestination(WriteableUserPath userSuppliedPath) throws IOException {
synchronized (recorder) {
+ checkSetDestination(userSuppliedPath);
+ this.destination = userSuppliedPath;
+ }
+ }
+
+ public void checkSetDestination(WriteableUserPath userSuppliedPath) throws IOException {
+ synchronized (recorder) {
if (Utils.isState(getState(), RecordingState.STOPPED, RecordingState.CLOSED)) {
throw new IllegalStateException("Destination can't be set on a recording that has been stopped/closed");
}
- this.destination = userSuppliedPath;
}
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/management/ManagementSupport.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/management/ManagementSupport.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,8 @@
package jdk.jfr.internal.management;
+import java.io.IOException;
+import java.nio.file.Paths;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
@@ -98,4 +100,12 @@
WriteableUserPath wup = pr.getDestination();
return wup == null ? null : wup.getOriginalText();
}
+
+ public static void checkSetDestination(Recording recording, String destination) throws IOException{
+ PlatformRecording pr = PrivateAccess.getInstance().getPlatformRecording(recording);
+ if(destination != null){
+ WriteableUserPath wup = new WriteableUserPath(Paths.get(destination));
+ pr.checkSetDestination(wup);
+ }
+ }
}
--- a/src/jdk.jshell/share/classes/jdk/jshell/ExpressionToTypeInfo.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.jshell/share/classes/jdk/jshell/ExpressionToTypeInfo.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -149,6 +149,7 @@
private static class Result extends Error {
static final long serialVersionUID = -5942088234594905629L;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
final TreePath expressionPath;
Result(TreePath path) {
--- a/src/jdk.management.jfr/share/classes/jdk/management/jfr/FlightRecorderMXBeanImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.management.jfr/share/classes/jdk/management/jfr/FlightRecorderMXBeanImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -28,6 +28,7 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
+import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.AccessControlContext;
import java.security.AccessController;
@@ -105,7 +106,8 @@
private static final String OPTION_DISK = "disk";
private static final String OPTION_DUMP_ON_EXIT = "dumpOnExit";
private static final String OPTION_DURATION = "duration";
- private static final List<String> OPTIONS = Arrays.asList(new String[] { OPTION_DUMP_ON_EXIT, OPTION_DURATION, OPTION_NAME, OPTION_MAX_AGE, OPTION_MAX_SIZE, OPTION_DISK, });
+ private static final String OPTION_DESTINATION = "destination";
+ private static final List<String> OPTIONS = Arrays.asList(new String[] { OPTION_DUMP_ON_EXIT, OPTION_DURATION, OPTION_NAME, OPTION_MAX_AGE, OPTION_MAX_SIZE, OPTION_DISK, OPTION_DESTINATION, });
private final StreamManager streamHandler = new StreamManager();
private final Map<Long, Object> changes = new ConcurrentHashMap<>();
private final AtomicLong sequenceNumber = new AtomicLong();
@@ -283,6 +285,7 @@
validateOption(ops, OPTION_MAX_AGE, MBeanUtils::duration);
validateOption(ops, OPTION_MAX_SIZE, MBeanUtils::size);
validateOption(ops, OPTION_DURATION, MBeanUtils::duration);
+ validateOption(ops, OPTION_DESTINATION, x -> MBeanUtils.destination(r, x));
// All OK, now set them.atomically
setOption(ops, OPTION_DUMP_ON_EXIT, "false", MBeanUtils::booleanValue, x -> r.setDumpOnExit(x));
@@ -291,6 +294,7 @@
setOption(ops, OPTION_MAX_AGE, null, MBeanUtils::duration, x -> r.setMaxAge(x));
setOption(ops, OPTION_MAX_SIZE, "0", MBeanUtils::size, x -> r.setMaxSize(x));
setOption(ops, OPTION_DURATION, null, MBeanUtils::duration, x -> r.setDuration(x));
+ setOption(ops, OPTION_DESTINATION, null, x -> MBeanUtils.destination(r, x), x -> setOptionDestination(r, x));
}
@Override
@@ -305,6 +309,7 @@
Long maxSize = r.getMaxSize();
options.put(OPTION_MAX_SIZE, String.valueOf(maxSize == null ? "0" : maxSize.toString()));
options.put(OPTION_DURATION, ManagementSupport.formatTimespan(r.getDuration(), " "));
+ options.put(OPTION_DESTINATION, ManagementSupport.getDestinationOriginalText(r));
return options;
}
@@ -349,6 +354,20 @@
}
}
+ private static void setOptionDestination(Recording recording, String destination){
+ try {
+ Path pathDestination = null;
+ if(destination != null){
+ pathDestination = Paths.get(destination);
+ }
+ recording.setDestination(pathDestination);
+ } catch (IOException e) {
+ IllegalArgumentException iae = new IllegalArgumentException("Not a valid destination " + destination);
+ iae.addSuppressed(e);
+ throw iae;
+ }
+ }
+
private static <T, U> void validateOption(Map<String, String> options, String name, Function<String, U> validator) {
try {
String v = options.get(name);
--- a/src/jdk.management.jfr/share/classes/jdk/management/jfr/MBeanUtils.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.management.jfr/share/classes/jdk/management/jfr/MBeanUtils.java Fri Oct 11 12:08:01 2019 +0530
@@ -24,6 +24,7 @@
*/
package jdk.management.jfr;
+import java.io.IOException;
import java.lang.management.ManagementPermission;
import java.security.Permission;
import java.time.DateTimeException;
@@ -37,6 +38,7 @@
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
+import jdk.jfr.Recording;
import jdk.jfr.internal.management.ManagementSupport;
final class MBeanUtils {
@@ -126,5 +128,16 @@
}
return size;
}
+
+ public static String destination(Recording recording, String destination) throws IllegalArgumentException{
+ try {
+ ManagementSupport.checkSetDestination(recording, destination);
+ return destination;
+ }catch(IOException e){
+ IllegalArgumentException iae = new IllegalArgumentException("Not a valid destination " + destination);
+ iae.addSuppressed(e);
+ throw iae;
+ }
+ }
}
--- a/src/jdk.management/share/classes/com/sun/management/ThreadMXBean.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.management/share/classes/com/sun/management/ThreadMXBean.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,7 @@
* @throws NullPointerException if {@code ids} is {@code null}
* @throws IllegalArgumentException if any element in the input array
* {@code ids} is {@code <=} {@code 0}.
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine implementation does not support CPU time
* measurement.
*
@@ -95,7 +95,7 @@
* @throws NullPointerException if {@code ids} is {@code null}
* @throws IllegalArgumentException if any element in the input array
* {@code ids} is {@code <=} {@code 0}.
- * @throws java.lang.UnsupportedOperationException if the Java
+ * @throws UnsupportedOperationException if the Java
* virtual machine implementation does not support CPU time
* measurement.
*
@@ -109,13 +109,50 @@
/**
* Returns an approximation of the total amount of memory, in bytes,
- * allocated in heap memory for the thread of the specified ID.
+ * allocated in heap memory for the current thread.
+ * The returned value is an approximation because some Java virtual machine
+ * implementations may use object allocation mechanisms that result in a
+ * delay between the time an object is allocated and the time its size is
+ * recorded.
+ *
+ * <p>
+ * This is a convenience method for local management use and is
+ * equivalent to calling:
+ * <blockquote><pre>
+ * {@link #getThreadAllocatedBytes getThreadAllocatedBytes}(Thread.currentThread().getId());
+ * </pre></blockquote>
+ *
+ * @implSpec The default implementation throws
+ * {@code UnsupportedOperationException}.
+ *
+ * @return an approximation of the total memory allocated, in bytes, in
+ * heap memory for the current thread
+ * if thread memory allocation measurement is enabled;
+ * {@code -1} otherwise.
+ *
+ * @throws UnsupportedOperationException if the Java virtual
+ * machine implementation does not support thread memory allocation
+ * measurement.
+ *
+ * @see #isThreadAllocatedMemorySupported
+ * @see #isThreadAllocatedMemoryEnabled
+ * @see #setThreadAllocatedMemoryEnabled
+ *
+ * @since 14
+ */
+ public default long getCurrentThreadAllocatedBytes() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Returns an approximation of the total amount of memory, in bytes,
+ * allocated in heap memory for the thread with the specified ID.
* The returned value is an approximation because some Java virtual machine
* implementations may use object allocation mechanisms that result in a
* delay between the time an object is allocated and the time its size is
* recorded.
* <p>
- * If the thread of the specified ID is not alive or does not exist,
+ * If the thread with the specified ID is not alive or does not exist,
* this method returns {@code -1}. If thread memory allocation measurement
* is disabled, this method returns {@code -1}.
* A thread is alive if it has been started and has not yet died.
@@ -127,13 +164,13 @@
*
* @param id the thread ID of a thread
* @return an approximation of the total memory allocated, in bytes, in
- * heap memory for a thread of the specified ID
- * if the thread of the specified ID exists, the thread is alive,
+ * heap memory for the thread with the specified ID
+ * if the thread with the specified ID exists, the thread is alive,
* and thread memory allocation measurement is enabled;
* {@code -1} otherwise.
*
* @throws IllegalArgumentException if {@code id} {@code <=} {@code 0}.
- * @throws java.lang.UnsupportedOperationException if the Java virtual
+ * @throws UnsupportedOperationException if the Java virtual
* machine implementation does not support thread memory allocation
* measurement.
*
@@ -165,7 +202,7 @@
* @throws NullPointerException if {@code ids} is {@code null}
* @throws IllegalArgumentException if any element in the input array
* {@code ids} is {@code <=} {@code 0}.
- * @throws java.lang.UnsupportedOperationException if the Java virtual
+ * @throws UnsupportedOperationException if the Java virtual
* machine implementation does not support thread memory allocation
* measurement.
*
@@ -194,7 +231,7 @@
* @return {@code true} if thread memory allocation measurement is enabled;
* {@code false} otherwise.
*
- * @throws java.lang.UnsupportedOperationException if the Java virtual
+ * @throws UnsupportedOperationException if the Java virtual
* machine does not support thread memory allocation measurement.
*
* @see #isThreadAllocatedMemorySupported
@@ -208,10 +245,10 @@
* @param enable {@code true} to enable;
* {@code false} to disable.
*
- * @throws java.lang.UnsupportedOperationException if the Java virtual
+ * @throws UnsupportedOperationException if the Java virtual
* machine does not support thread memory allocation measurement.
*
- * @throws java.lang.SecurityException if a security manager
+ * @throws SecurityException if a security manager
* exists and the caller does not have
* ManagementPermission("control").
*
--- a/src/jdk.management/share/classes/com/sun/management/internal/HotSpotThreadImpl.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.management/share/classes/com/sun/management/internal/HotSpotThreadImpl.java Fri Oct 11 12:08:01 2019 +0530
@@ -58,6 +58,11 @@
}
@Override
+ public long getCurrentThreadAllocatedBytes() {
+ return super.getCurrentThreadAllocatedBytes();
+ }
+
+ @Override
public long getThreadAllocatedBytes(long id) {
return super.getThreadAllocatedBytes(id);
}
--- a/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/Property.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/Property.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,6 +103,7 @@
public static final int IS_ACCESSOR_PROPERTY = 1 << 12;
/** Property key. */
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Object key;
/** Property flags. */
--- a/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/RecompilableScriptFunctionData.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/RecompilableScriptFunctionData.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -118,11 +118,13 @@
* Opaque object representing parser state at the end of the function. Used when reparsing outer function
* to help with skipping parsing inner functions.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Object endParserState;
/** Code installer used for all further recompilation/specialization of this ScriptFunction */
private transient CodeInstaller installer;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Map<Integer, RecompilableScriptFunctionData> nestedFunctions;
/** Id to parent function if one exists */
@@ -135,8 +137,10 @@
private transient DebugLogger log;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Map<String, Integer> externalScopeDepths;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Set<String> internalSymbols;
private static final int GET_SET_PREFIX_LENGTH = "*et ".length();
--- a/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/SharedPropertyMap.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/SharedPropertyMap.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,7 @@
*/
public final class SharedPropertyMap extends PropertyMap {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private SwitchPoint switchPoint;
private static final long serialVersionUID = 2166297719721778876L;
--- a/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/StoredScript.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/StoredScript.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,12 +42,15 @@
private final String mainClassName;
/** Map of class names to class bytes. */
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Map<String, byte[]> classBytes;
/** Constants array. */
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Object[] constants;
/** Function initializers */
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Map<Integer, FunctionInitializer> initializers;
private static final long serialVersionUID = 2958227232195298340L;
--- a/src/jdk.security.auth/share/classes/com/sun/security/auth/module/NTSystem.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.security.auth/share/classes/com/sun/security/auth/module/NTSystem.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,12 +35,14 @@
private native void getCurrent(boolean debug);
private native long getImpersonationToken0();
+ // Warning: the next 6 fields are used by nt.c
private String userName;
private String domain;
private String domainSID;
private String userSID;
private String[] groupIDs;
private String primaryGroupID;
+
private long impersonationToken;
/**
--- a/src/jdk.security.auth/share/classes/com/sun/security/auth/module/UnixSystem.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.security.auth/share/classes/com/sun/security/auth/module/UnixSystem.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
private native void getUnixInfo();
+ // Warning: the following 4 fields are used by Unix.c
protected String username;
protected long uid;
protected long gid;
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/JarFileSystem.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.zipfs/share/classes/jdk/nio/zipfs/JarFileSystem.java Fri Oct 11 12:08:01 2019 +0530
@@ -60,18 +60,20 @@
JarFileSystem(ZipFileSystemProvider provider, Path zfpath, Map<String,?> env) throws IOException {
super(provider, zfpath, env);
- if (isMultiReleaseJar()) {
+ Object o = getRuntimeVersion(env);
+ if (isMultiReleaseJar() && (o != null)) {
int version;
- Object o = env.get("multi-release");
if (o instanceof String) {
String s = (String)o;
if (s.equals("runtime")) {
version = Runtime.version().feature();
+ } else if (s.matches("^[1-9][0-9]*$")) {
+ version = Version.parse(s).feature();
} else {
- version = Integer.parseInt(s);
+ throw new IllegalArgumentException("Invalid runtime version");
}
} else if (o instanceof Integer) {
- version = (Integer)o;
+ version = Version.parse(((Integer)o).toString()).feature();
} else if (o instanceof Version) {
version = ((Version)o).feature();
} else {
@@ -83,6 +85,23 @@
}
}
+ /**
+ * Utility method to get the release version for a multi-release JAR. It
+ * first checks the documented property {@code releaseVersion} and if not
+ * found checks the original property {@code multi-release}
+ * @param env ZIP FS map
+ * @return release version or null if it is not specified
+ */
+ private Object getRuntimeVersion(Map<String, ?> env) {
+ Object o = null;
+ if (env.containsKey(ZipFileSystemProvider.PROPERTY_RELEASE_VERSION)) {
+ o = env.get(ZipFileSystemProvider.PROPERTY_RELEASE_VERSION);
+ } else {
+ o = env.get(ZipFileSystemProvider.PROPERTY_MULTI_RELEASE);
+ }
+ return o;
+ }
+
private boolean isMultiReleaseJar() throws IOException {
try (InputStream is = newInputStream(getBytes("/META-INF/MANIFEST.MF"))) {
String multiRelease = new Manifest(is).getMainAttributes()
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java Fri Oct 11 12:08:01 2019 +0530
@@ -81,13 +81,19 @@
(PrivilegedAction<Boolean>)()->System.getProperty("os.name")
.startsWith("Windows"));
private static final byte[] ROOTPATH = new byte[] { '/' };
- private static final String OPT_POSIX = "enablePosixFileAttributes";
- private static final String OPT_DEFAULT_OWNER = "defaultOwner";
- private static final String OPT_DEFAULT_GROUP = "defaultGroup";
- private static final String OPT_DEFAULT_PERMISSIONS = "defaultPermissions";
+ private static final String PROPERTY_POSIX = "enablePosixFileAttributes";
+ private static final String PROPERTY_DEFAULT_OWNER = "defaultOwner";
+ private static final String PROPERTY_DEFAULT_GROUP = "defaultGroup";
+ private static final String PROPERTY_DEFAULT_PERMISSIONS = "defaultPermissions";
private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS =
PosixFilePermissions.fromString("rwxrwxrwx");
+ // Property used to specify the compression mode to use
+ private static final String PROPERTY_COMPRESSION_METHOD = "compressionMethod";
+ // Value specified for compressionMethod property to compress Zip entries
+ private static final String COMPRESSION_METHOD_DEFLATED = "DEFLATED";
+ // Value specified for compressionMethod property to not compress Zip entries
+ private static final String COMPRESSION_METHOD_STORED = "STORED";
private final ZipFileSystemProvider provider;
private final Path zfpath;
@@ -124,8 +130,8 @@
this.noExtt = "false".equals(env.get("zipinfo-time"));
this.useTempFile = isTrue(env, "useTempFile");
this.forceEnd64 = isTrue(env, "forceZIP64End");
- this.defaultCompressionMethod = isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED;
- this.supportPosix = isTrue(env, OPT_POSIX);
+ this.defaultCompressionMethod = getDefaultCompressionMethod(env);
+ this.supportPosix = isTrue(env, PROPERTY_POSIX);
this.defaultOwner = initOwner(zfpath, env);
this.defaultGroup = initGroup(zfpath, env);
this.defaultPermissions = initPermissions(env);
@@ -138,7 +144,7 @@
new END().write(os, 0, forceEnd64);
}
} else {
- throw new FileSystemNotFoundException(zfpath.toString());
+ throw new NoSuchFileException(zfpath.toString());
}
}
// sm and existence check
@@ -163,6 +169,50 @@
this.zfpath = zfpath;
}
+ /**
+ * Return the compression method to use (STORED or DEFLATED). If the
+ * property {@code commpressionMethod} is set use its value to determine
+ * the compression method to use. If the property is not set, then the
+ * default compression is DEFLATED unless the property {@code noCompression}
+ * is set which is supported for backwards compatibility.
+ * @param env Zip FS map of properties
+ * @return The Compression method to use
+ */
+ private int getDefaultCompressionMethod(Map<String, ?> env) {
+ int result =
+ isTrue(env, "noCompression") ? METHOD_STORED : METHOD_DEFLATED;
+ if (env.containsKey(PROPERTY_COMPRESSION_METHOD)) {
+ Object compressionMethod = env.get(PROPERTY_COMPRESSION_METHOD);
+ if (compressionMethod != null) {
+ if (compressionMethod instanceof String) {
+ switch (((String) compressionMethod).toUpperCase()) {
+ case COMPRESSION_METHOD_STORED:
+ result = METHOD_STORED;
+ break;
+ case COMPRESSION_METHOD_DEFLATED:
+ result = METHOD_DEFLATED;
+ break;
+ default:
+ throw new IllegalArgumentException(String.format(
+ "The value for the %s property must be %s or %s",
+ PROPERTY_COMPRESSION_METHOD, COMPRESSION_METHOD_STORED,
+ COMPRESSION_METHOD_DEFLATED));
+ }
+ } else {
+ throw new IllegalArgumentException(String.format(
+ "The Object type for the %s property must be a String",
+ PROPERTY_COMPRESSION_METHOD));
+ }
+ } else {
+ throw new IllegalArgumentException(String.format(
+ "The value for the %s property must be %s or %s",
+ PROPERTY_COMPRESSION_METHOD, COMPRESSION_METHOD_STORED,
+ COMPRESSION_METHOD_DEFLATED));
+ }
+ }
+ return result;
+ }
+
// returns true if there is a name=true/"true" setting in env
private static boolean isTrue(Map<String, ?> env, String name) {
return "true".equals(env.get(name)) || TRUE.equals(env.get(name));
@@ -173,7 +223,7 @@
// be determined, we try to go with system property "user.name". If that's not
// accessible, we return "<zipfs_default>".
private UserPrincipal initOwner(Path zfpath, Map<String, ?> env) throws IOException {
- Object o = env.get(OPT_DEFAULT_OWNER);
+ Object o = env.get(PROPERTY_DEFAULT_OWNER);
if (o == null) {
try {
PrivilegedExceptionAction<UserPrincipal> pa = ()->Files.getOwner(zfpath);
@@ -193,7 +243,7 @@
if (o instanceof String) {
if (((String)o).isEmpty()) {
throw new IllegalArgumentException("Value for property " +
- OPT_DEFAULT_OWNER + " must not be empty.");
+ PROPERTY_DEFAULT_OWNER + " must not be empty.");
}
return ()->(String)o;
}
@@ -201,7 +251,7 @@
return (UserPrincipal)o;
}
throw new IllegalArgumentException("Value for property " +
- OPT_DEFAULT_OWNER + " must be of type " + String.class +
+ PROPERTY_DEFAULT_OWNER + " must be of type " + String.class +
" or " + UserPrincipal.class);
}
@@ -210,7 +260,7 @@
// If this is not possible/unsupported, we will return a group principal going by
// the same name as the default owner.
private GroupPrincipal initGroup(Path zfpath, Map<String, ?> env) throws IOException {
- Object o = env.get(OPT_DEFAULT_GROUP);
+ Object o = env.get(PROPERTY_DEFAULT_GROUP);
if (o == null) {
try {
PosixFileAttributeView zfpv = Files.getFileAttributeView(zfpath, PosixFileAttributeView.class);
@@ -232,7 +282,7 @@
if (o instanceof String) {
if (((String)o).isEmpty()) {
throw new IllegalArgumentException("Value for property " +
- OPT_DEFAULT_GROUP + " must not be empty.");
+ PROPERTY_DEFAULT_GROUP + " must not be empty.");
}
return ()->(String)o;
}
@@ -240,14 +290,14 @@
return (GroupPrincipal)o;
}
throw new IllegalArgumentException("Value for property " +
- OPT_DEFAULT_GROUP + " must be of type " + String.class +
+ PROPERTY_DEFAULT_GROUP + " must be of type " + String.class +
" or " + GroupPrincipal.class);
}
// Initialize the default permissions for files inside the zip archive.
// If not specified in env, it will return 777.
private Set<PosixFilePermission> initPermissions(Map<String, ?> env) {
- Object o = env.get(OPT_DEFAULT_PERMISSIONS);
+ Object o = env.get(PROPERTY_DEFAULT_PERMISSIONS);
if (o == null) {
return DEFAULT_PERMISSIONS;
}
@@ -256,7 +306,7 @@
}
if (!(o instanceof Set)) {
throw new IllegalArgumentException("Value for property " +
- OPT_DEFAULT_PERMISSIONS + " must be of type " + String.class +
+ PROPERTY_DEFAULT_PERMISSIONS + " must be of type " + String.class +
" or " + Set.class);
}
Set<PosixFilePermission> perms = new HashSet<>();
@@ -264,7 +314,7 @@
if (o2 instanceof PosixFilePermission) {
perms.add((PosixFilePermission)o2);
} else {
- throw new IllegalArgumentException(OPT_DEFAULT_PERMISSIONS +
+ throw new IllegalArgumentException(PROPERTY_DEFAULT_PERMISSIONS +
" must only contain objects of type " + PosixFilePermission.class);
}
}
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystemProvider.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystemProvider.java Fri Oct 11 12:08:01 2019 +0530
@@ -53,6 +53,11 @@
*/
public class ZipFileSystemProvider extends FileSystemProvider {
+ // Property used to specify the entry version to use for a multi-release JAR
+ static final String PROPERTY_RELEASE_VERSION = "releaseVersion";
+ // Original property used to specify the entry version to use for a
+ // multi-release JAR which is kept for backwards compatibility.
+ static final String PROPERTY_MULTI_RELEASE = "multi-release";
private final Map<Path, ZipFileSystem> filesystems = new HashMap<>();
public ZipFileSystemProvider() {}
@@ -104,20 +109,7 @@
if (filesystems.containsKey(realPath))
throw new FileSystemAlreadyExistsException();
}
- ZipFileSystem zipfs;
- try {
- if (env.containsKey("multi-release")) {
- zipfs = new JarFileSystem(this, path, env);
- } else {
- zipfs = new ZipFileSystem(this, path, env);
- }
- } catch (ZipException ze) {
- String pname = path.toString();
- if (pname.endsWith(".zip") || pname.endsWith(".jar"))
- throw ze;
- // assume NOT a zip/jar file
- throw new UnsupportedOperationException();
- }
+ ZipFileSystem zipfs = getZipFileSystem(path, env);
if (realPath == null) { // newly created
realPath = path.toRealPath();
}
@@ -131,20 +123,25 @@
throws IOException
{
ensureFile(path);
+ return getZipFileSystem(path, env);
+ }
+
+ private ZipFileSystem getZipFileSystem(Path path, Map<String, ?> env) throws IOException {
+ ZipFileSystem zipfs;
try {
- ZipFileSystem zipfs;
- if (env.containsKey("multi-release")) {
+ if (env.containsKey(PROPERTY_RELEASE_VERSION) ||
+ env.containsKey(PROPERTY_MULTI_RELEASE)) {
zipfs = new JarFileSystem(this, path, env);
} else {
zipfs = new ZipFileSystem(this, path, env);
}
- return zipfs;
} catch (ZipException ze) {
String pname = path.toString();
if (pname.endsWith(".zip") || pname.endsWith(".jar"))
throw ze;
throw new UnsupportedOperationException();
}
+ return zipfs;
}
@Override
--- a/src/jdk.zipfs/share/classes/module-info.java Wed Oct 09 17:06:06 2019 -0700
+++ b/src/jdk.zipfs/share/classes/module-info.java Fri Oct 11 12:08:01 2019 +0530
@@ -147,7 +147,7 @@
* <tbody>
* <tr>
* <th scope="row">create</th>
- * <td>java.lang.String</td>
+ * <td>{@link java.lang.String} or {@link java.lang.Boolean}</td>
* <td>false</td>
* <td>
* If the value is {@code true}, the Zip file system provider
@@ -156,7 +156,7 @@
* </tr>
* <tr>
* <th scope="row">encoding</th>
- * <td>java.lang.String</td>
+ * <td>{@link java.lang.String}</td>
* <td>UTF-8</td>
* <td>
* The value indicates the encoding scheme for the
@@ -164,8 +164,8 @@
* </td>
* </tr>
* <tr>
- * <td scope="row">enablePosixFileAttributes</td>
- * <td>java.lang.String</td>
+ * <th scope="row">enablePosixFileAttributes</th>
+ * <td>{@link java.lang.String} or {@link java.lang.Boolean}</td>
* <td>false</td>
* <td>
* If the value is {@code true}, the Zip file system will support
@@ -173,8 +173,9 @@
* </td>
* </tr>
* <tr>
- * <td scope="row">defaultOwner</td>
- * <td>{@link java.nio.file.attribute.UserPrincipal UserPrincipal}<br> or java.lang.String</td>
+ * <th scope="row">defaultOwner</th>
+ * <td>{@link java.nio.file.attribute.UserPrincipal UserPrincipal}<br> or
+ * {@link java.lang.String}</td>
* <td>null/unset</td>
* <td>
* Override the default owner for entries in the Zip file system.<br>
@@ -182,8 +183,9 @@
* </td>
* </tr>
* <tr>
- * <td scope="row">defaultGroup</td>
- * <td>{@link java.nio.file.attribute.GroupPrincipal GroupPrincipal}<br> or java.lang.String</td>
+ * <th scope="row">defaultGroup</th>
+ * <td>{@link java.nio.file.attribute.GroupPrincipal GroupPrincipal}<br> or
+ * {@link java.lang.String}</td>
* <td>null/unset</td>
* <td>
* Override the the default group for entries in the Zip file system.<br>
@@ -191,9 +193,9 @@
* </td>
* </tr>
* <tr>
- * <td scope="row">defaultPermissions</td>
+ * <th scope="row">defaultPermissions</th>
* <td>{@link java.util.Set Set}<{@link java.nio.file.attribute.PosixFilePermission PosixFilePermission}><br>
- * or java.lang.String</td>
+ * or {@link java.lang.String}</td>
* <td>null/unset</td>
* <td>
* Override the default Set of permissions for entries in the Zip file system.<br>
@@ -201,7 +203,66 @@
* a String that is parsed by {@link java.nio.file.attribute.PosixFilePermissions#fromString PosixFilePermissions::fromString}
* </td>
* </tr>
- * </tbody>
+ * <tr>
+ * <th scope="row">compressionMethod</th>
+ * <td>{@link java.lang.String}</td>
+ * <td>"DEFLATED"</td>
+ * <td>
+ * The value representing the compression method to use when writing entries
+ * to the Zip file system.
+ * <ul>
+ * <li>
+ * If the value is {@code "STORED"}, the Zip file system provider will
+ * not compress entries when writing to the Zip file system.
+ * </li>
+ * <li>
+ * If the value is {@code "DEFLATED"} or the property is not set,
+ * the Zip file system provider will use data compression when
+ * writing entries to the Zip file system.
+ * </li>
+ * <li>
+ * If the value is not {@code "STORED"} or {@code "DEFLATED"}, an
+ * {@code IllegalArgumentException} will be thrown when the Zip
+ * filesystem is created.
+ * </li>
+ * </ul>
+ * </td>
+ * </tr>
+ * <tr>
+ * <th scope="row">releaseVersion</th>
+ * <td>{@link java.lang.String} or {@link java.lang.Integer}</td>
+ * <td>null/unset</td>
+ * <td>
+ * A value representing the version entry to use when accessing a
+ * <a href=="{@docRoot}/../specs/jar/jar.html#multi-release-jar-files">
+ * multi-release JAR</a>. If the JAR is not a
+ * <a href=="{@docRoot}/../specs/jar/jar.html#multi-release-jar-files">
+ * multi-release JAR</a>, the value will be ignored and the JAR will be
+ * considered un-versioned.
+ * <p>
+ * The value must be either the string "runtime" or represent a valid
+ * {@linkplain Runtime.Version Java SE Platform version number},
+ * such as {@code 9} or {@code 14}, in order to determine the version entry.
+ *
+ * <ul>
+ * <li>
+ * If the value is {@code null} or the property is not set,
+ * then the JAR will be treated as an un-versioned JAR.
+ * </li>
+ * <li>
+ * If the value is {@code "runtime"}, the
+ * version entry will be determined by invoking
+ * {@linkplain Runtime.Version#feature() Runtime.Version.feature()}.
+ * </li>
+ * <li>
+ * If the value does not represent a valid
+ * {@linkplain Runtime.Version Java SE Platform version number},
+ * an {@code IllegalArgumentException} will be thrown.
+ * </li>
+ * </ul>
+ * </td>
+ * </tr>
+ * </tbody>
* </table>
*
* <h2>Examples:</h2>
@@ -223,7 +284,7 @@
* <pre>
* {@code
*
- * FileSystem zipfs = FileSystems.newFileSystem(Path.of("helloworld.jar"), null);
+ * FileSystem zipfs = FileSystems.newFileSystem(Path.of("helloworld.jar"));
* Path rootDir = zipfs.getPath("/");
* Files.walk(rootDir)
* .forEach(System.out::println);
--- a/test/hotspot/gtest/runtime/test_os_windows.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/gtest/runtime/test_os_windows.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -85,4 +85,595 @@
<< "Failed to allocate memory at requested location " << expected_location << " of size " << expected_allocation_size;
}
+// The types of path modifications we randomly apply to a path. They should not change the file designated by the path.
+enum ModsFilter {
+ Allow_None = 0, // No modifications
+ Allow_Sep_Mods = 1, // Replace '\\' by any sequence of '/' or '\\' or at least length 1.
+ Allow_Dot_Path = 2, // Add /. segments at random positions
+ Allow_Dot_Dot_Path = 4, // Add /../<correct-dir> segments at random positions.
+ Allow_All = Allow_Sep_Mods | Allow_Dot_Path | Allow_Dot_Dot_Path
+};
+
+// The mode in which to run.
+enum Mode {
+ TEST, // Runs the test. This is the normal modus.
+ EXAMPLES, // Runs example which document the behaviour of the Windows system calls.
+ BENCH // Runs a small benchmark which tries to show the costs of using the *W variants/_wfullpath.
+};
+
+// Parameters of the test.
+static ModsFilter mods_filter = Allow_All;
+static int mods_per_path = 50; // The number of variants of a path we try.
+static Mode mode = TEST;
+
+
+// Utility methods
+static void get_current_dir_w(wchar_t* path, size_t size) {
+ DWORD count = GetCurrentDirectoryW((DWORD) size, path);
+ EXPECT_GT((int) count, 0) << "Failed to get current directory: " << GetLastError();
+ EXPECT_LT((size_t) count, size) << "Buffer too small for current directory: " << size;
+}
+
+#define WITH_ABS_PATH(path) \
+ wchar_t abs_path[JVM_MAXPATHLEN]; \
+ wchar_t cwd[JVM_MAXPATHLEN]; \
+ get_current_dir_w(cwd, JVM_MAXPATHLEN); \
+ wsprintfW(abs_path, L"\\\\?\\%ls\\%ls", cwd, (path))
+
+static bool file_exists_w(const wchar_t* path) {
+ WIN32_FILE_ATTRIBUTE_DATA file_data;
+ return ::GetFileAttributesExW(path, GetFileExInfoStandard, &file_data);
+}
+
+static void create_rel_directory_w(const wchar_t* path) {
+ WITH_ABS_PATH(path);
+ EXPECT_FALSE(file_exists_w(abs_path)) << "Can't create directory: \"" << path << "\" already exists";
+ BOOL result = CreateDirectoryW(abs_path, NULL);
+ EXPECT_TRUE(result) << "Failed to create directory \"" << path << "\" " << GetLastError();
+}
+
+static void delete_empty_rel_directory_w(const wchar_t* path) {
+ WITH_ABS_PATH(path);
+ EXPECT_TRUE(file_exists_w(abs_path)) << "Can't delete directory: \"" << path << "\" does not exists";
+ BOOL result = RemoveDirectoryW(abs_path);
+ EXPECT_TRUE(result) << "Failed to delete directory \"" << path << "\": " << GetLastError();
+}
+
+static void create_rel_file_w(const wchar_t* path) {
+ WITH_ABS_PATH(path);
+ EXPECT_FALSE(file_exists_w(abs_path)) << "Can't create file: \"" << path << "\" already exists";
+ HANDLE h = CreateFileW(abs_path, 0, 0, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL);
+ EXPECT_NE(h, INVALID_HANDLE_VALUE) << "Failed to create file \"" << path << "\": " << GetLastError();
+ CloseHandle(h);
+}
+
+static void delete_rel_file_w(const wchar_t* path) {
+ WITH_ABS_PATH(path);
+ EXPECT_TRUE(file_exists_w(abs_path)) << "Can't delete file: \"" << path << "\" does not exists";
+ BOOL result = DeleteFileW(abs_path);
+ EXPECT_TRUE(result) << "Failed to delete file \"" << path << "\": " << GetLastError();
+}
+
+static bool convert_to_cstring(char* c_str, size_t size, wchar_t* w_str) {
+ size_t converted;
+ errno_t err = wcstombs_s(&converted, c_str, size, w_str, size - 1);
+ EXPECT_EQ(err, ERROR_SUCCESS) << "Could not convert \"" << w_str << "\" to c-string";
+
+ return err == ERROR_SUCCESS;
+}
+
+static wchar_t* my_wcscpy_s(wchar_t* dest, size_t size, wchar_t* start, const wchar_t* to_copy) {
+ size_t already_used = dest - start;
+ size_t len = wcslen(to_copy);
+
+ if (already_used + len < size) {
+ wcscpy_s(dest, size - already_used, to_copy);
+ }
+
+ return dest + wcslen(to_copy);
+}
+
+// The currently finite list of seperator sequences we might use instead of '\\'.
+static const wchar_t* sep_replacements[] = {
+ L"\\", L"\\/", L"/", L"//", L"\\\\/\\", L"//\\/"
+};
+
+// Takes a path and modifies it in a way that it should still designate the same file.
+static bool unnormalize_path(wchar_t* result, size_t size, bool is_dir, const wchar_t* path) {
+ wchar_t* dest = result;
+ const wchar_t* src = path;
+ const wchar_t* path_start;
+
+ if (wcsncmp(src, L"\\\\?\\UNC\\", 8) == 0) {
+ path_start = src + 8;
+ } else if (wcsncmp(src, L"\\\\?\\", 4) == 0) {
+ if (src[5] == L':') {
+ path_start = src + 6;
+ } else {
+ path_start = wcschr(src + 4, L'\\');
+ }
+ } else if (wcsncmp(src, L"\\\\", 2) == 0) {
+ path_start = wcschr(src + 2, L'?');
+
+ if (path_start == NULL) {
+ path_start = wcschr(src + 2, L'\\');
+ } else {
+ path_start = wcschr(path_start, L'\\');
+ }
+ } else {
+ path_start = wcschr(src + 1, L'\\');
+ }
+
+ bool allow_sep_change = (mods_filter & Allow_Sep_Mods) && (os::random() & 1) == 0;
+ bool allow_dot_change = (mods_filter & Allow_Dot_Path) && (os::random() & 1) == 0;
+ bool allow_dotdot_change = (mods_filter & Allow_Dot_Dot_Path) && (os::random() & 1) == 0;
+
+ while ((*src != L'\0') && (result + size > dest)) {
+ wchar_t c = *src;
+ *dest = c;
+ ++src;
+ ++dest;
+
+ if (c == L'\\') {
+ if (allow_sep_change && (os::random() & 3) == 3) {
+ int i = os::random() % (sizeof(sep_replacements) / sizeof(sep_replacements[0]));
+
+ if (i >= 0) {
+ const wchar_t* replacement = sep_replacements[i];
+ dest = my_wcscpy_s(dest - 1, size, result, replacement);
+ }
+ } else if (path_start != NULL) {
+ if (allow_dotdot_change && (src > path_start + 1) && ((os::random() & 7) == 7)) {
+ wchar_t const* last_sep = src - 2;
+
+ while (last_sep[0] != L'\\') {
+ --last_sep;
+ }
+
+ if (last_sep > path_start) {
+ dest = my_wcscpy_s(dest, size, result, L"../");
+ src = last_sep + 1;
+ }
+ } else if (allow_dot_change && (src > path_start + 1) && ((os::random() & 7) == 7)) {
+ dest = my_wcscpy_s(dest, size, result, L"./");
+ }
+ }
+ }
+ }
+
+ while (is_dir && ((os::random() & 15) == 1)) {
+ dest = my_wcscpy_s(dest, size, result, L"/");
+ }
+
+ if (result + size > dest) {
+ *dest = L'\0';
+ }
+
+ // Use this modification only if not too close to the max size.
+ return result + size - 10 > dest;
+}
+
+static void check_dir_impl(wchar_t* path, bool should_be_empty) {
+ char buf[JVM_MAXPATHLEN];
+
+ if (convert_to_cstring(buf, JVM_MAXPATHLEN, path)) {
+ struct stat st;
+ EXPECT_EQ(os::stat(buf, &st), 0) << "os::stat failed for \"" << path << "\"";
+ EXPECT_EQ(st.st_mode & S_IFMT, S_IFDIR) << "\"" << path << "\" is not a directory according to os::stat";
+ errno = ERROR_SUCCESS;
+ bool is_empty = os::dir_is_empty(buf);
+ errno_t err = errno;
+ EXPECT_EQ(is_empty, should_be_empty) << "os::dir_is_empty assumed \"" << path << "\" is "
+ << (should_be_empty ? "not ": "") << "empty";
+ EXPECT_EQ(err, ERROR_SUCCESS) << "os::dir_is_empty failed for \"" << path << "\"with errno " << err;
+ }
+}
+
+static void check_file_impl(wchar_t* path) {
+ char buf[JVM_MAXPATHLEN];
+
+ if (convert_to_cstring(buf, JVM_MAXPATHLEN, path)) {
+ struct stat st;
+ EXPECT_EQ(os::stat(buf, &st), 0) << "os::stat failed for \"" << path << "\"";
+ EXPECT_EQ(st.st_mode & S_IFMT, S_IFREG) << "\"" << path << "\" is not a regular file according to os::stat";
+ int fd = os::open(buf, O_RDONLY, 0);
+ EXPECT_NE(fd, -1) << "os::open failed for \"" << path << "\" with errno " << errno;
+ if (fd >= 0) {
+ ::close(fd);
+ }
+ }
+}
+
+static void check_file_not_present_impl(wchar_t* path) {
+ char buf[JVM_MAXPATHLEN];
+
+ if (convert_to_cstring(buf, JVM_MAXPATHLEN, path)) {
+ struct stat st;
+ int stat_ret;
+ EXPECT_EQ(stat_ret = os::stat(buf, &st), -1) << "os::stat did not fail for \"" << path << "\"";
+ if (stat_ret != -1) {
+ // Only check open if stat not already failed.
+ int fd = os::open(buf, O_RDONLY, 0);
+ EXPECT_EQ(fd, -1) << "os::open did not fail for \"" << path << "\"";
+ if (fd >= 0) {
+ ::close(fd);
+ }
+ }
+ }
+}
+
+static void check_dir(wchar_t* path, bool should_be_empty) {
+ check_dir_impl(path, should_be_empty);
+
+ for (int i = 0; mods_filter != Allow_None && i < mods_per_path; ++i) {
+ wchar_t tmp[JVM_MAXPATHLEN];
+ if (unnormalize_path(tmp, JVM_MAXPATHLEN, true, path)) {
+ check_dir_impl(tmp, should_be_empty);
+ }
+ }
+}
+
+static void check_file(wchar_t* path) {
+ check_file_impl(path);
+
+ // Check os::same_files at least somewhat.
+ char buf[JVM_MAXPATHLEN];
+
+ if (convert_to_cstring(buf, JVM_MAXPATHLEN, path)) {
+ wchar_t mod[JVM_MAXPATHLEN];
+
+ if (unnormalize_path(mod, JVM_MAXPATHLEN, false, path)) {
+ char mod_c[JVM_MAXPATHLEN];
+ if (convert_to_cstring(mod_c, JVM_MAXPATHLEN, mod)) {
+ EXPECT_EQ(os::same_files(buf, mod_c), true) << "os::same files failed for \\" << path << "\" and \"" << mod_c << "\"";
+ }
+ }
+ }
+
+ for (int i = 0; mods_filter != Allow_None && i < mods_per_path; ++i) {
+ wchar_t tmp[JVM_MAXPATHLEN];
+ if (unnormalize_path(tmp, JVM_MAXPATHLEN, false, path)) {
+ check_file_impl(tmp);
+ }
+ }
+}
+
+static void check_file_not_present(wchar_t* path) {
+ check_file_not_present_impl(path);
+
+ for (int i = 0; mods_filter != Allow_None && i < mods_per_path; ++i) {
+ wchar_t tmp[JVM_MAXPATHLEN];
+ if (unnormalize_path(tmp, JVM_MAXPATHLEN, false, path)) {
+ check_file_not_present_impl(tmp);
+ }
+ }
+}
+
+static void record_path(char const* name, char const* len_name, wchar_t* path) {
+ char buf[JVM_MAXPATHLEN];
+
+ if (convert_to_cstring(buf, JVM_MAXPATHLEN, path)) {
+ ::testing::Test::RecordProperty(name, buf);
+ os::snprintf(buf, JVM_MAXPATHLEN, "%d", (int) wcslen(path));
+ ::testing::Test::RecordProperty(len_name, buf);
+ }
+}
+
+static void bench_path(wchar_t* path) {
+ char buf[JVM_MAXPATHLEN];
+ int reps = 100000;
+
+ if (convert_to_cstring(buf, JVM_MAXPATHLEN, path)) {
+ jlong wtime[2];
+
+ for (int t = 0; t < 2; ++t) {
+ wtime[t] = os::javaTimeNanos();
+
+ for (int i = 0; i < reps; ++i) {
+ bool succ = false;
+ size_t buf_len = strlen(buf);
+ wchar_t* w_path = (wchar_t*) os::malloc(sizeof(wchar_t) * (buf_len + 1), mtInternal);
+
+ if (w_path != NULL) {
+ size_t converted_chars;
+ if (::mbstowcs_s(&converted_chars, w_path, buf_len + 1, buf, buf_len) == ERROR_SUCCESS) {
+ if (t == 1) {
+ wchar_t* tmp = (wchar_t*) os::malloc(sizeof(wchar_t) * JVM_MAXPATHLEN, mtInternal);
+
+ if (tmp) {
+ if (_wfullpath(tmp, w_path, JVM_MAXPATHLEN)) {
+ succ = true;
+ }
+
+ // Note that we really don't use the full path name, but just add the cost of running _wfullpath.
+ os::free(tmp);
+ }
+ if (!succ) {
+ printf("Failed fullpathing \"%s\"\n", buf);
+ return;
+ }
+ succ = false;
+ }
+ HANDLE h = ::CreateFileW(w_path, 0, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+
+ if (h != INVALID_HANDLE_VALUE) {
+ ::CloseHandle(h);
+ succ = true;
+ }
+ }
+ }
+
+ os::free(w_path);
+ if (!succ) {
+ printf("Failed getting W*attr. \"%s\"\n", buf);
+ return;
+ }
+ }
+
+ wtime[t] = os::javaTimeNanos() - wtime[t];
+ }
+
+ jlong ctime = os::javaTimeNanos();
+
+ for (int i = 0; i < reps; ++i) {
+ HANDLE h = ::CreateFileA(buf, 0, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+
+ if (h == INVALID_HANDLE_VALUE) {
+ return;
+ }
+
+ ::CloseHandle(h);
+ }
+
+ ctime = os::javaTimeNanos() - ctime;
+
+ printf("\"%s\" %f us for *A, %f us for *W, %f us for *W with fullpath\n", buf,
+ 0.001 * ctime / reps, 0.001 * wtime[0] / reps, 0.001 * wtime[1] / reps);
+ }
+}
+
+static void print_attr_result_for_path(wchar_t* path) {
+ WIN32_FILE_ATTRIBUTE_DATA file_data;
+ struct stat st;
+ char buf[JVM_MAXPATHLEN];
+ wchar_t abs[JVM_MAXPATHLEN];
+
+ _wfullpath(abs, path, JVM_MAXPATHLEN);
+ printf("Checking \"%ls\" (%d chars):\n", path, (int) wcslen(path));
+ printf("_wfullpath %ls (%d chars)\n", abs, (int) wcslen(abs));
+ BOOL bret = ::GetFileAttributesExW(path, GetFileExInfoStandard, &file_data);
+ printf("GetFileAttributesExW() %s\n", bret ? "success" : "failed");
+
+ if (convert_to_cstring(buf, JVM_MAXPATHLEN, path)) {
+ bret = ::GetFileAttributesExA(buf, GetFileExInfoStandard, &file_data);
+ printf("GetFileAttributesExA() %s\n", bret ? "success" : "failed");
+
+ bool succ = os::stat(buf, &st) != -1;
+ printf("os::stat() %s\n", succ ? "success" : "failed");
+ }
+}
+
+static void print_attr_result(wchar_t* format, ...) {
+ va_list argptr;
+ wchar_t buf[JVM_MAXPATHLEN];
+
+ va_start(argptr, format);
+ wvsprintfW(buf, format, argptr);
+ print_attr_result_for_path(buf);
+ va_end(argptr);
+}
+
+#define RECORD_PATH(name) record_path(#name, #name "Len", name)
+#define NAME_PART_50 L"01234567890123456789012345678901234567890123456789"
+#define NAME_PART_250 NAME_PART_50 NAME_PART_50 NAME_PART_50 NAME_PART_50 NAME_PART_50
+
+// Test which tries to find out if the os::stat, os::open, os::same_files and os::dir_is_empty methods
+// can handle long path names correctly.
+TEST_VM(os_windows, handle_long_paths) {
+ static wchar_t cwd[JVM_MAXPATHLEN];
+ static wchar_t nearly_long_rel_path[JVM_MAXPATHLEN];
+ static wchar_t long_rel_path[JVM_MAXPATHLEN];
+ static wchar_t empty_dir_rel_path[JVM_MAXPATHLEN];
+ static wchar_t not_empty_dir_rel_path[JVM_MAXPATHLEN];
+ static wchar_t file_rel_path[JVM_MAXPATHLEN];
+ static wchar_t nearly_long_file_rel_path[JVM_MAXPATHLEN];
+ static wchar_t nearly_long_path[JVM_MAXPATHLEN];
+ static wchar_t empty_dir_path[JVM_MAXPATHLEN];
+ static wchar_t not_empty_dir_path[JVM_MAXPATHLEN];
+ static wchar_t nearly_long_file_path[JVM_MAXPATHLEN];
+ static wchar_t file_path[JVM_MAXPATHLEN];
+ static wchar_t nearly_long_unc_path[JVM_MAXPATHLEN];
+ static wchar_t empty_dir_unc_path[JVM_MAXPATHLEN];
+ static wchar_t not_empty_dir_unc_path[JVM_MAXPATHLEN];
+ static wchar_t nearly_long_file_unc_path[JVM_MAXPATHLEN];
+ static wchar_t file_unc_path[JVM_MAXPATHLEN];
+ static wchar_t root_dir_path[JVM_MAXPATHLEN];
+ static wchar_t root_rel_dir_path[JVM_MAXPATHLEN];
+
+ wchar_t* dir_prefix = L"os_windows_long_paths_dir_";
+ wchar_t* empty_dir_name = L"empty_directory_with_long_path";
+ wchar_t* not_empty_dir_name = L"not_empty_directory_with_long_path";
+ wchar_t* file_name = L"file";
+ wchar_t dir_letter;
+ WIN32_FILE_ATTRIBUTE_DATA file_data;
+ bool can_test_unc = false;
+
+ get_current_dir_w(cwd, sizeof(cwd) / sizeof(wchar_t));
+ dir_letter = (cwd[1] == L':' ? cwd[0] : L'\0');
+ int cwd_len = (int) wcslen(cwd);
+ int dir_prefix_len = (int) wcslen(dir_prefix);
+ int rel_path_len = MAX2(dir_prefix_len, 235 - cwd_len);
+
+ memcpy(nearly_long_rel_path, dir_prefix, sizeof(wchar_t) * dir_prefix_len);
+
+ for (int i = dir_prefix_len; i < rel_path_len; ++i) {
+ nearly_long_rel_path[i] = L'L';
+ }
+
+ nearly_long_rel_path[rel_path_len] = L'\0';
+
+ wsprintfW(long_rel_path, L"%ls\\%ls", nearly_long_rel_path, NAME_PART_250);
+ wsprintfW(empty_dir_rel_path, L"%ls\\%ls", nearly_long_rel_path, empty_dir_name);
+ wsprintfW(not_empty_dir_rel_path, L"%ls\\%ls", nearly_long_rel_path, not_empty_dir_name);
+ wsprintfW(nearly_long_file_rel_path, L"%ls\\%ls", nearly_long_rel_path, file_name);
+ wsprintfW(file_rel_path, L"%ls\\%ls\\%ls", nearly_long_rel_path, not_empty_dir_name, file_name);
+ wsprintfW(nearly_long_path, L"\\\\?\\%ls\\%ls", cwd, nearly_long_rel_path);
+ wsprintfW(empty_dir_path, L"%ls\\%ls", nearly_long_path, empty_dir_name);
+ wsprintfW(not_empty_dir_path, L"%ls\\%ls", nearly_long_path, not_empty_dir_name);
+ wsprintfW(nearly_long_file_path, L"%ls\\%ls", nearly_long_path, file_name);
+ wsprintfW(file_path, L"%ls\\%ls\\%ls", nearly_long_path, not_empty_dir_name, file_name);
+ wsprintfW(nearly_long_unc_path, L"\\\\localhost\\%lc$\\%s", dir_letter, nearly_long_path + 7);
+ wsprintfW(empty_dir_unc_path, L"%s\\%s", nearly_long_unc_path, empty_dir_name);
+ wsprintfW(not_empty_dir_unc_path, L"%s\\%s", nearly_long_unc_path, not_empty_dir_name);
+ wsprintfW(nearly_long_file_unc_path, L"%ls\\%ls", nearly_long_unc_path, file_name);
+ wsprintfW(file_unc_path, L"%s\\%s\\%s", nearly_long_unc_path, not_empty_dir_name, file_name);
+ wsprintfW(root_dir_path, L"%lc:\\", dir_letter);
+ wsprintfW(root_rel_dir_path, L"%lc:", dir_letter);
+
+ RECORD_PATH(long_rel_path);
+ RECORD_PATH(nearly_long_rel_path);
+ RECORD_PATH(nearly_long_path);
+ RECORD_PATH(nearly_long_unc_path);
+ RECORD_PATH(empty_dir_rel_path);
+ RECORD_PATH(empty_dir_path);
+ RECORD_PATH(empty_dir_unc_path);
+ RECORD_PATH(not_empty_dir_rel_path);
+ RECORD_PATH(not_empty_dir_path);
+ RECORD_PATH(not_empty_dir_unc_path);
+ RECORD_PATH(nearly_long_file_rel_path);
+ RECORD_PATH(nearly_long_file_path);
+ RECORD_PATH(nearly_long_file_unc_path);
+ RECORD_PATH(file_rel_path);
+ RECORD_PATH(file_path);
+ RECORD_PATH(file_unc_path);
+
+ create_rel_directory_w(nearly_long_rel_path);
+ create_rel_directory_w(long_rel_path);
+ create_rel_directory_w(empty_dir_rel_path);
+ create_rel_directory_w(not_empty_dir_rel_path);
+ create_rel_file_w(nearly_long_file_rel_path);
+ create_rel_file_w(file_rel_path);
+
+ // For UNC path test we assume that the current DRIVE has a share
+ // called "<DRIVELETTER>$" (so for D: we expect \\localhost\D$ to be
+ // the same). Since this is only an assumption, we have to skip
+ // the UNC tests if the share is missing.
+ if (dir_letter && !::GetFileAttributesExW(nearly_long_unc_path, GetFileExInfoStandard, &file_data)) {
+ printf("Disabled UNC path test, since %lc: is not mapped as share %lc$.\n", dir_letter, dir_letter);
+ } else {
+ can_test_unc = true;
+ }
+
+ if (mode == BENCH) {
+ bench_path(nearly_long_path + 4);
+ bench_path(nearly_long_rel_path);
+ bench_path(nearly_long_file_path + 4);
+ bench_path(nearly_long_file_rel_path);
+ } else if (mode == EXAMPLES) {
+ printf("Working directory: %ls", cwd);
+
+ if (dir_letter) {
+ static wchar_t top_buf[JVM_MAXPATHLEN];
+ wchar_t* top_path = wcschr(cwd + 3, L'\\');
+
+ if (top_path) {
+ size_t top_len = (top_path - cwd) - 3;
+
+ memcpy(top_buf, cwd + 3, top_len * 2);
+ top_buf[top_len] = L'\0';
+ top_path = top_buf;
+ }
+
+ print_attr_result(L"%lc:\\", dir_letter);
+ print_attr_result(L"%lc:\\.\\", dir_letter);
+
+ if (top_path) {
+ print_attr_result(L"%lc:\\%ls\\..\\%ls\\", dir_letter, top_path, top_path);
+ }
+
+ print_attr_result(L"%lc:", dir_letter);
+ print_attr_result(L"%lc:.", dir_letter);
+ print_attr_result(L"%lc:\\COM1", dir_letter);
+ print_attr_result(L"%lc:\\PRN", dir_letter);
+ print_attr_result(L"%lc:\\PRN\\COM1", dir_letter);
+ print_attr_result(L"\\\\?\\UNC\\localhost\\%lc$\\", dir_letter);
+ print_attr_result(L"\\\\?\\UNC\\\\localhost\\%lc$\\", dir_letter);
+ print_attr_result(nearly_long_unc_path);
+ print_attr_result(L"%ls\\.\\", nearly_long_unc_path);
+ print_attr_result(L"%ls\\..\\%ls", nearly_long_unc_path, nearly_long_rel_path);
+ print_attr_result(L"\\\\?\\UNC\\%ls", nearly_long_unc_path + 2);
+ print_attr_result(file_unc_path);
+ print_attr_result(L"%ls\\%ls\\..\\%ls\\%ls", nearly_long_unc_path, not_empty_dir_name, not_empty_dir_name, file_name);
+ print_attr_result(L"%ls\\%ls\\.\\%ls", nearly_long_unc_path, not_empty_dir_name, file_name);
+ print_attr_result(L"\\\\?\\UNC\\%ls", file_unc_path + 2);
+ print_attr_result(L"\\\\?\\UNC\\%ls\\%ls\\.\\%ls", nearly_long_unc_path + 2, not_empty_dir_name, file_name);
+ print_attr_result(L"\\\\?\\UNC\\%ls\\%ls\\..\\%ls\\%ls", nearly_long_unc_path + 2, not_empty_dir_name, not_empty_dir_name, file_name);
+ }
+
+ print_attr_result(nearly_long_rel_path);
+ print_attr_result(L"%ls\\.\\", nearly_long_rel_path);
+ print_attr_result(L"%ls\\..\\%ls", nearly_long_rel_path, nearly_long_rel_path);
+ print_attr_result(L"%\\\\?\\%ls", nearly_long_rel_path);
+ print_attr_result(L"\\\\?\\%ls\\.\\", nearly_long_rel_path);
+ print_attr_result(L"\\\\?\\%ls\\..\\%ls", nearly_long_rel_path, nearly_long_rel_path);
+
+ print_attr_result(nearly_long_path + 4);
+ print_attr_result(L"%ls\\.\\", nearly_long_path + 4);
+ print_attr_result(L"%ls\\..\\%ls", nearly_long_path + 4, nearly_long_rel_path);
+ print_attr_result(nearly_long_path);
+ print_attr_result(L"%ls\\.\\", nearly_long_path);
+ print_attr_result(L"%ls\\..\\%ls", nearly_long_path, nearly_long_rel_path);
+ } else {
+ check_file_not_present(L"");
+
+ // Check relative paths
+ check_dir(nearly_long_rel_path, false);
+ check_dir(long_rel_path, true);
+ check_dir(empty_dir_rel_path, true);
+ check_dir(not_empty_dir_rel_path, false);
+ check_file(nearly_long_file_rel_path);
+ check_file(file_rel_path);
+
+ // Check absolute paths
+ if (dir_letter) {
+ check_dir(root_dir_path, false);
+ check_dir(root_rel_dir_path, false);
+ }
+
+ check_dir(cwd, false);
+ check_dir(nearly_long_path + 4, false);
+ check_dir(empty_dir_path + 4, true);
+ check_dir(not_empty_dir_path + 4, false);
+ check_file(nearly_long_file_path + 4);
+ check_file(file_path + 4);
+
+ // Check UNC paths
+ if (can_test_unc) {
+ check_dir(nearly_long_unc_path, false);
+ check_dir(empty_dir_unc_path, true);
+ check_dir(not_empty_dir_unc_path, false);
+ check_file(nearly_long_file_unc_path);
+ check_file(file_unc_path);
+ }
+
+ // Check handling of <DRIVE>:/../<OTHER_DRIVE>:/path/...
+ // The other drive letter should not overwrite the original one.
+ if (dir_letter) {
+ static wchar_t tmp[JVM_MAXPATHLEN];
+ wchar_t* other_letter = dir_letter == L'D' ? L"C" : L"D";
+ wsprintfW(tmp, L"%2ls\\..\\%ls:%ls", nearly_long_file_path, other_letter, nearly_long_file_path + 2);
+ check_file_not_present(tmp);
+ wsprintfW(tmp, L"%2ls\\..\\%ls:%ls", file_path, other_letter, file_path + 2);
+ check_file_not_present(tmp);
+ }
+ }
+
+ delete_rel_file_w(file_rel_path);
+ delete_rel_file_w(nearly_long_file_rel_path);
+ delete_empty_rel_directory_w(not_empty_dir_rel_path);
+ delete_empty_rel_directory_w(empty_dir_rel_path);
+ delete_empty_rel_directory_w(long_rel_path);
+ delete_empty_rel_directory_w(nearly_long_rel_path);
+}
+
#endif
--- a/test/hotspot/jtreg/ProblemList-graal.txt Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/ProblemList-graal.txt Fri Oct 11 12:08:01 2019 +0530
@@ -236,7 +236,6 @@
runtime/exceptionMsgs/AbstractMethodError/AbstractMethodErrorTest.java 8222582 generic-all
# Graal unit tests
-org.graalvm.compiler.core.test.CheckGraalInvariants 8205081
org.graalvm.compiler.core.test.OptionsVerifierTest 8205081
org.graalvm.compiler.hotspot.test.CompilationWrapperTest 8205081
org.graalvm.compiler.replacements.test.classfile.ClassfileBytecodeProviderTest 8205081
--- a/test/hotspot/jtreg/ProblemList.txt Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/ProblemList.txt Fri Oct 11 12:08:01 2019 +0530
@@ -206,16 +206,4 @@
vmTestbase/nsk/jdwp/ThreadReference/ForceEarlyReturn/forceEarlyReturn001/forceEarlyReturn001.java 7199837 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/SynchronizerLockingThreads/SynchronizerLockingThreads001/TestDescription.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/SynchronizerLockingThreads/SynchronizerLockingThreads002/TestDescription.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/SynchronizerLockingThreads/SynchronizerLockingThreads003/TestDescription.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/SynchronizerLockingThreads/SynchronizerLockingThreads004/TestDescription.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/SynchronizerLockingThreads/SynchronizerLockingThreads005/TestDescription.java 8231032 generic-all
-
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/Multi/Multi001/Multi001.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/Multi/Multi002/TestDescription.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/Multi/Multi003/TestDescription.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/Multi/Multi004/TestDescription.java 8231032 generic-all
-vmTestbase/nsk/monitoring/ThreadMXBean/ThreadInfo/Multi/Multi005/TestDescription.java 8231032 generic-all
-
#############################################################################
--- a/test/hotspot/jtreg/compiler/c2/cr6340864/TestDoubleVect.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/c2/cr6340864/TestDoubleVect.java Fri Oct 11 12:08:01 2019 +0530
@@ -87,6 +87,9 @@
test_divv(a0, a1, -VALUE);
test_diva(a0, a1, a3);
test_negc(a0, a1);
+ test_rint(a0, a1);
+ test_ceil(a0, a1);
+ test_floor(a0, a1);
}
// Test and verify results
System.out.println("Verification");
@@ -351,6 +354,56 @@
errn += verify("test_negc: ", i, a0[i], (double)(-((double)(ADD_INIT+i))));
}
+ // To test -ve and +ve Zero scenarios.
+ double [] other_corner_cases = { -0.0, 0.0, 9.007199254740992E15 };
+ double [] other_corner_cases_res = new double[3];
+ test_floor(a0, a1);
+ errn += verify("test_floor: ", 0, a0[0], Double.NaN);
+ errn += verify("test_floor: ", 1, a0[1], Double.POSITIVE_INFINITY);
+ errn += verify("test_floor: ", 2, a0[2], Double.NEGATIVE_INFINITY);
+ errn += verify("test_floor: ", 3, a0[3], Double.MAX_VALUE);
+ errn += verify("test_floor: ", 4, a0[4], 0.0);
+ errn += verify("test_floor: ", 5, a0[5], 0.0);
+ for (int i=6; i<ARRLEN; i++) {
+ errn += verify("test_floor: ", i, a0[i], ((double)(ADD_INIT+i)));
+ }
+ test_floor_cc(other_corner_cases_res, other_corner_cases);
+ errn += verify("test_floor_cc: ", 0, other_corner_cases_res[0], -0.0);
+ errn += verify("test_floor_cc: ", 1, other_corner_cases_res[1], 0.0);
+ errn += verify("test_floor_cc: ", 2, other_corner_cases_res[2], 9.007199254740992E15);
+
+ test_ceil(a0, a1);
+ errn += verify("test_ceil: ", 0, a0[0], Double.NaN);
+ errn += verify("test_ceil: ", 1, a0[1], Double.POSITIVE_INFINITY);
+ errn += verify("test_ceil: ", 2, a0[2], Double.NEGATIVE_INFINITY);
+ errn += verify("test_ceil: ", 3, a0[3], Double.MAX_VALUE);
+ errn += verify("test_ceil: ", 4, a0[4], 1.0);
+ errn += verify("test_ceil: ", 5, a0[5], 1.0);
+ for (int i=6; i<ARRLEN; i++) {
+ errn += verify("test_ceil: ", i, a0[i], ((double)(ADD_INIT+i+1.0)));
+ }
+ test_ceil_cc(other_corner_cases_res, other_corner_cases);
+ errn += verify("test_ceil_cc: ", 0, other_corner_cases_res[0], -0.0);
+ errn += verify("test_ceil_cc: ", 1, other_corner_cases_res[1], 0.0);
+ errn += verify("test_ceil_cc: ", 2, other_corner_cases_res[2], 9.007199254740992E15);
+
+ test_rint(a0, a1);
+ errn += verify("test_rint: ", 0, a0[0], Double.NaN);
+ errn += verify("test_rint: ", 1, a0[1], Double.POSITIVE_INFINITY);
+ errn += verify("test_rint: ", 2, a0[2], Double.NEGATIVE_INFINITY);
+ errn += verify("test_rint: ", 3, a0[3], Double.MAX_VALUE);
+ errn += verify("test_rint: ", 4, a0[4], 0.0);
+ errn += verify("test_rint: ", 5, a0[5], 0.0);
+ for (int i=6; i<ARRLEN; i++) {
+ if ( i <= 500 )
+ errn += verify("test_rint: ", i, a0[i], ((double)(ADD_INIT+i)));
+ else
+ errn += verify("test_rint: ", i, a0[i], ((double)(ADD_INIT+i+1.0)));
+ }
+ test_rint_cc(other_corner_cases_res, other_corner_cases);
+ errn += verify("test_rint_cc: ", 0, other_corner_cases_res[0], -0.0);
+ errn += verify("test_rint_cc: ", 1, other_corner_cases_res[1], 0.0);
+ errn += verify("test_rint_cc: ", 2, other_corner_cases_res[2], 9.007199254740992E15);
}
if (errn > 0)
@@ -577,6 +630,37 @@
}
}
+ static void test_rint(double[] a0, double[] a1) {
+ for (int i = 0; i < a0.length; i+=1) {
+ a0[i] = Math.rint(a1[i] + ((double)(i))/1000);
+ }
+ }
+ static void test_ceil(double[] a0, double[] a1) {
+ for (int i = 0; i < a0.length; i+=1) {
+ a0[i] = Math.ceil(a1[i] + ((double)(i))/1000);
+ }
+ }
+ static void test_floor(double[] a0, double[] a1) {
+ for (int i = 0; i < a0.length; i+=1) {
+ a0[i] = Math.floor(a1[i] + ((double)(i))/1000);
+ }
+ }
+ static void test_rint_cc(double[] a0, double[] a1) {
+ for (int i = 0; i < a0.length; i+=1) {
+ a0[i] = Math.rint(a1[i]);
+ }
+ }
+ static void test_ceil_cc(double[] a0, double[] a1) {
+ for (int i = 0; i < a0.length; i+=1) {
+ a0[i] = Math.ceil(a1[i]);
+ }
+ }
+ static void test_floor_cc(double[] a0, double[] a1) {
+ for (int i = 0; i < a0.length; i+=1) {
+ a0[i] = Math.floor(a1[i]);
+ }
+ }
+
static int verify(String text, int i, double elem, double val) {
if (elem != val && !(Double.isNaN(elem) && Double.isNaN(val))) {
System.err.println(text + "[" + i + "] = " + elem + " != " + val);
--- a/test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationAllTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationAllTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,12 +33,14 @@
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- * -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
+ * -XX:+WhiteBoxAPI
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:-DeoptimizeRandom
* -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
* -XX:-SegmentedCodeCache
* compiler.codecache.stress.UnexpectedDeoptimizationAllTest
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- * -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
+ * -XX:+WhiteBoxAPI
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:-DeoptimizeRandom
* -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
* -XX:+SegmentedCodeCache
* compiler.codecache.stress.UnexpectedDeoptimizationAllTest
--- a/test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/codecache/stress/UnexpectedDeoptimizationTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,12 +33,14 @@
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- * -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
+ * -XX:+WhiteBoxAPI
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:-DeoptimizeRandom
* -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
* -XX:-SegmentedCodeCache
* compiler.codecache.stress.UnexpectedDeoptimizationTest
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
- * -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
+ * -XX:+WhiteBoxAPI
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:-DeoptimizeRandom
* -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
* -XX:+SegmentedCodeCache
* compiler.codecache.stress.UnexpectedDeoptimizationTest
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/escapeAnalysis/TestEliminateLocksOffCrash.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8227384
+ * @summary C2 compilation fails with "graph should be schedulable" when running with -XX:-EliminateLocks
+ *
+ * @run main/othervm -XX:-EliminateLocks TestEliminateLocksOffCrash
+ */
+
+public class TestEliminateLocksOffCrash {
+ public static void main(String[] args) {
+ for (int i = 0; i < 20_000; i++) {
+ try {
+ test();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ private static void test() throws Exception {
+ Object obj = new Object();
+ synchronized (obj) {
+ throw new Exception();
+ }
+ }
+}
--- a/test/hotspot/jtreg/compiler/escapeAnalysis/TestSelfArrayCopy.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/escapeAnalysis/TestSelfArrayCopy.java Fri Oct 11 12:08:01 2019 +0530
@@ -23,10 +23,10 @@
/*
* @test
- * @bug 8229016
+ * @bug 8229016 8231055
* @summary Test correct elimination of array allocation with arraycopy to itself.
* @library /test/lib
- * @run main/othervm -Xbatch -XX:CompileCommand=compileonly,compiler.escapeAnalysis.TestSelfArrayCopy::test
+ * @run main/othervm -Xbatch -XX:CompileCommand=compileonly,compiler.escapeAnalysis.TestSelfArrayCopy::test*
* compiler.escapeAnalysis.TestSelfArrayCopy
*/
@@ -39,7 +39,7 @@
private static final int rI1 = Utils.getRandomInstance().nextInt();
private static final int rI2 = Utils.getRandomInstance().nextInt();
- private static int test() {
+ private static int test1() {
// Non-escaping allocation
Integer[] array = {rI1, rI2};
// Arraycopy with src == dst
@@ -51,14 +51,40 @@
return array[0] + array[1];
}
+ private static int test2() {
+ // Non-escaping allocation
+ Integer[] array = {rI1, rI2};
+ // Arraycopy with src == dst
+ System.arraycopy(array, 0, array, 1, 1);
+ if (b) {
+ // Uncommon trap
+ System.out.println(array[0]);
+ }
+ return array[0] + array[1];
+ }
+
public static void main(String[] args) {
- int expected = rI1 + rI2;
+ int expected1 = rI1 + rI2;
+ int expected2 = rI1 + rI1;
// Trigger compilation
for (int i = 0; i < 20_000; ++i) {
- int result = test();
- if (result != expected) {
- throw new RuntimeException("Incorrect result: " + result + " != " + expected);
+ int result = test1();
+ if (result != expected1) {
+ throw new RuntimeException("Incorrect result: " + result + " != " + expected1);
+ }
+ result = test2();
+ if (result != expected2) {
+ throw new RuntimeException("Incorrect result: " + result + " != " + expected2);
}
}
+ b = true;
+ int result = test1();
+ if (result != expected1) {
+ throw new RuntimeException("Incorrect result: " + result + " != " + expected1);
+ }
+ result = test2();
+ if (result != expected2) {
+ throw new RuntimeException("Incorrect result: " + result + " != " + expected2);
+ }
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/graalunit/CoreAarch64Test.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary
+ * @requires vm.opt.final.EnableJVMCI == true
+ *
+ * @modules jdk.internal.vm.compiler
+ *
+ * @library /test/lib /compiler/graalunit /
+ *
+ * @build compiler.graalunit.common.GraalUnitTestLauncher
+ *
+ * @run driver jdk.test.lib.FileInstaller ../../ProblemList-graal.txt ExcludeList.txt
+ *
+ * @run main compiler.graalunit.common.GraalUnitTestLauncher -prefix org.graalvm.compiler.core.aarch64.test -exclude ExcludeList.txt
+ */
+
+/* DO NOT MODIFY THIS FILE. GENERATED BY generateTests.sh */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/graalunit/CoreJdk9Test.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary
+ * @requires vm.opt.final.EnableJVMCI == true
+ *
+ * @modules jdk.internal.vm.compiler
+ *
+ * @library /test/lib /compiler/graalunit /
+ *
+ * @build compiler.graalunit.common.GraalUnitTestLauncher
+ *
+ * @run driver jdk.test.lib.FileInstaller ../../ProblemList-graal.txt ExcludeList.txt
+ *
+ * @run main compiler.graalunit.common.GraalUnitTestLauncher -prefix org.graalvm.compiler.core.jdk9.test -exclude ExcludeList.txt
+ */
+
+/* DO NOT MODIFY THIS FILE. GENERATED BY generateTests.sh */
--- a/test/hotspot/jtreg/compiler/graalunit/EA9Test.java Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @summary
- * @requires vm.opt.final.EnableJVMCI == true
- *
- * @modules jdk.internal.vm.compiler
- *
- * @library /test/lib /compiler/graalunit /
- *
- * @build compiler.graalunit.common.GraalUnitTestLauncher
- *
- * @run driver jdk.test.lib.FileInstaller ../../ProblemList-graal.txt ExcludeList.txt
- *
- * @run main compiler.graalunit.common.GraalUnitTestLauncher -prefix org.graalvm.compiler.core.jdk9.test.ea -exclude ExcludeList.txt
- */
-
-/* DO NOT MODIFY THIS FILE. GENERATED BY generateTests.sh */
--- a/test/hotspot/jtreg/compiler/graalunit/TestPackages.txt Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/graalunit/TestPackages.txt Fri Oct 11 12:08:01 2019 +0530
@@ -4,18 +4,19 @@
AsmAarch64 org.graalvm.compiler.asm.aarch64.test
AsmAmd64 org.graalvm.compiler.asm.amd64.test
AsmSparc org.graalvm.compiler.asm.sparc.test
-CoreAmd64 org.graalvm.compiler.core.amd64.test
Core org.graalvm.compiler.core.test @requires !vm.graal.enabled
EA org.graalvm.compiler.core.test.ea
-EA9 org.graalvm.compiler.core.jdk9.test.ea
+CoreAmd64 org.graalvm.compiler.core.amd64.test
+CoreAarch64 org.graalvm.compiler.core.aarch64.test
+CoreJdk9 org.graalvm.compiler.core.jdk9.test
Debug org.graalvm.compiler.debug.test
Graph org.graalvm.compiler.graph.test @requires vm.graal.enabled
+Hotspot org.graalvm.compiler.hotspot.test
HotspotAarch64 org.graalvm.compiler.hotspot.aarch64.test
HotspotAmd64 org.graalvm.compiler.hotspot.amd64.test
HotspotJdk9 org.graalvm.compiler.hotspot.jdk9.test
HotspotSparc org.graalvm.compiler.hotspot.sparc.test @requires vm.simpleArch == "sparcv9"
HotspotLir org.graalvm.compiler.hotspot.lir.test
-Hotspot org.graalvm.compiler.hotspot.test
Loop org.graalvm.compiler.loop.test
Nodes org.graalvm.compiler.nodes.test @requires vm.graal.enabled
Options org.graalvm.compiler.options.test
--- a/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/FindClassesByAnnotatedMethods.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/FindClassesByAnnotatedMethods.java Fri Oct 11 12:08:01 2019 +0530
@@ -73,7 +73,7 @@
System.out.print(jarFilePath);
while (e.hasMoreElements()) {
JarEntry je = e.nextElement();
- if (je.isDirectory() || !je.getName().endsWith(".class")) {
+ if (je.isDirectory() || !je.getName().endsWith(".class") || je.getName().equals("module-info.class")) {
continue;
}
Set<String> methodAnnotationTypes = new HashSet<>();
@@ -84,6 +84,8 @@
} catch (UnsupportedClassVersionError ucve) {
isSupported = false;
unsupportedClasses++;
+ } catch (Throwable t) {
+ throw new InternalError("Error while parsing class from " + je + " in " + jarFilePath, t);
}
String className = je.getName().substring(0, je.getName().length() - ".class".length()).replaceAll("/", ".");
if (!isSupported) {
@@ -129,7 +131,7 @@
/*
* Small bytecode parser that extract annotations.
*/
- private static final int MAJOR_VERSION_JAVA7 = 51;
+ private static final int MAJOR_VERSION_JAVA6 = 50;
private static final int MAJOR_VERSION_OFFSET = 44;
private static final byte CONSTANT_Utf8 = 1;
private static final byte CONSTANT_Integer = 3;
@@ -146,6 +148,8 @@
private static final byte CONSTANT_MethodType = 16;
private static final byte CONSTANT_Dynamic = 17;
private static final byte CONSTANT_InvokeDynamic = 18;
+ private static final byte CONSTANT_Module = 19;
+ private static final byte CONSTANT_Package = 20;
private static void readClassfile(DataInputStream stream, Collection<String> methodAnnotationTypes) throws IOException {
// magic
@@ -154,7 +158,7 @@
int minor = stream.readUnsignedShort();
int major = stream.readUnsignedShort();
- if (major < MAJOR_VERSION_JAVA7) {
+ if (major < MAJOR_VERSION_JAVA6) {
throw new UnsupportedClassVersionError("Unsupported class file version: " + major + "." + minor);
}
// Starting with JDK8, ignore a classfile that has a newer format than the current JDK.
@@ -210,7 +214,9 @@
switch (tag) {
case CONSTANT_Class:
case CONSTANT_String:
- case CONSTANT_MethodType: {
+ case CONSTANT_MethodType:
+ case CONSTANT_Module:
+ case CONSTANT_Package: {
skipFully(stream, 2);
break;
}
--- a/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/JLModule.java Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.mxtool.junit;
-
-import java.lang.reflect.Method;
-import java.util.Arrays;
-import java.util.Set;
-
-/**
- * Facade for the {@code java.lang.Module} class introduced in JDK9 that allows tests to be
- * developed against JDK8 but use module logic if deployed on JDK9.
- */
-class JLModule {
-
- private final Object realModule;
-
- JLModule(Object module) {
- this.realModule = module;
- }
-
- private static final Class<?> moduleClass;
- private static final Class<?> layerClass;
-
- private static final Method bootMethod;
- private static final Method modulesMethod;
- private static final Method getModuleMethod;
- private static final Method getUnnamedModuleMethod;
- private static final Method getNameMethod;
- private static final Method getPackagesMethod;
- private static final Method isExportedMethod;
- private static final Method isExported2Method;
- private static final Method addExportsMethod;
- private static final Method addOpensMethod;
- static {
- try {
- moduleClass = findModuleClass();
- Class<?> modulesClass = Class.forName("jdk.internal.module.Modules");
- layerClass = findModuleLayerClass();
- bootMethod = layerClass.getMethod("boot");
- modulesMethod = layerClass.getMethod("modules");
- getModuleMethod = Class.class.getMethod("getModule");
- getUnnamedModuleMethod = ClassLoader.class.getMethod("getUnnamedModule");
- getNameMethod = moduleClass.getMethod("getName");
- getPackagesMethod = moduleClass.getMethod("getPackages");
- isExportedMethod = moduleClass.getMethod("isExported", String.class);
- isExported2Method = moduleClass.getMethod("isExported", String.class, moduleClass);
- addExportsMethod = modulesClass.getDeclaredMethod("addExports", moduleClass, String.class, moduleClass);
- addOpensMethod = getDeclaredMethodOptional(modulesClass, "addOpens", moduleClass, String.class, moduleClass);
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- // API change http://hg.openjdk.java.net/jdk9/dev/hotspot/rev/afedee84773e.
- protected static Class<?> findModuleClass() throws ClassNotFoundException {
- try {
- return Class.forName("java.lang.Module");
- } catch (ClassNotFoundException e) {
- return Class.forName("java.lang.reflect.Module");
- }
- }
-
- // API change http://hg.openjdk.java.net/jdk9/dev/hotspot/rev/afedee84773e.
- protected static Class<?> findModuleLayerClass() throws ClassNotFoundException {
- try {
- return Class.forName("java.lang.ModuleLayer");
- } catch (ClassNotFoundException e) {
- return Class.forName("java.lang.reflect.Layer");
- }
- }
-
- private static Method getDeclaredMethodOptional(Class<?> declaringClass, String name, Class<?>... parameterTypes) {
- try {
- return declaringClass.getDeclaredMethod(name, parameterTypes);
- } catch (NoSuchMethodException e) {
- return null;
- }
- }
-
- public static JLModule fromClass(Class<?> cls) {
- try {
- return new JLModule(getModuleMethod.invoke(cls));
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- @SuppressWarnings("unchecked")
- public static JLModule find(String name) {
- try {
- Object bootLayer = bootMethod.invoke(null);
- Set<Object> modules = (Set<Object>) modulesMethod.invoke(bootLayer);
- for (Object m : modules) {
- JLModule module = new JLModule(m);
- String mname = module.getName();
- if (mname.equals(name)) {
- return module;
- }
- }
- } catch (Exception e) {
- throw new InternalError(e);
- }
- return null;
- }
-
- public static JLModule getUnnamedModuleFor(ClassLoader cl) {
- try {
- return new JLModule(getUnnamedModuleMethod.invoke(cl));
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- public String getName() {
- try {
- return (String) getNameMethod.invoke(realModule);
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- /**
- * Exports all packages in this module to a given module.
- */
- public void exportAllPackagesTo(JLModule module) {
- if (this != module) {
- for (String pkg : getPackages()) {
- // Export all JVMCI packages dynamically instead
- // of requiring a long list of -XaddExports
- // options on the JVM command line.
- if (!isExported(pkg, module)) {
- addExports(pkg, module);
- addOpens(pkg, module);
- }
- }
- }
- }
-
- @SuppressWarnings("unchecked")
- public Iterable<String> getPackages() {
- try {
- // API change http://hg.openjdk.java.net/jdk9/dev/hotspot/rev/afedee84773e#l1.15
- Object res = getPackagesMethod.invoke(realModule);
- if (res instanceof String[]) {
- return Arrays.asList((String[]) res);
- }
- return (Set<String>) res;
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- public boolean isExported(String pn) {
- try {
- return (Boolean) isExportedMethod.invoke(realModule, pn);
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- public boolean isExported(String pn, JLModule other) {
- try {
- return (Boolean) isExported2Method.invoke(realModule, pn, other.realModule);
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- public void addExports(String pn, JLModule other) {
- try {
- addExportsMethod.invoke(null, realModule, pn, other.realModule);
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
-
- public void addOpens(String pn, JLModule other) {
- if (addOpensMethod != null) {
- try {
- addOpensMethod.invoke(null, realModule, pn, other.realModule);
- } catch (Exception e) {
- throw new AssertionError(e);
- }
- }
- }
-
- @Override
- public String toString() {
- return realModule.toString();
- }
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/ModuleSupport.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.mxtool.junit;
+
+import java.io.PrintStream;
+import java.util.Set;
+
+/**
+ * Interface to {@code java.lang.Module} related functionality.
+ */
+class ModuleSupport {
+
+ /**
+ * @param out stream to use for printing warnings
+ */
+ ModuleSupport(PrintStream out) {
+ }
+
+ /**
+ * Exports and opens packages based on {@code spec}. See further documentation in
+ * {@code mx_unittest.py}.
+ *
+ * @param spec
+ * @param context
+ * @param opened the set of opens performed are added to this set in the format
+ * {@code <module>/<package>=<target-module>(,<target-module>)*} (e.g.
+ * {@code "com.foo/com.foo.util=ALL-NAMED,com.bar"})
+ * @param exported the set of exports performed are added to this set in the format
+ * {@code <module>/<package>=<target-module>(,<target-module>)*} (e.g.
+ * {@code "com.foo/com.foo.util=ALL-NAMED,com.bar"})
+ */
+ void openPackages(String spec, Object context, Set<String> opened, Set<String> exported) {
+ // Nop on JDK 8
+ }
+
+ /**
+ * Updates modules specified in {@code AddExport} annotations on {@code classes} to export and
+ * open packages to the annotation classes' declaring modules.
+ *
+ * @param classes
+ * @param opened the set of opens performed are added to this set in the format
+ * {@code <module>/<package>=<target-module>(,<target-module>)*} (e.g.
+ * {@code "com.foo/com.foo.util=ALL-NAMED,com.bar"})
+ * @param exported the set of exports performed are added to this set in the format
+ * {@code <module>/<package>=<target-module>(,<target-module>)*} (e.g.
+ * {@code "com.foo/com.foo.util=ALL-NAMED,com.bar"})
+ */
+ void processAddExportsAnnotations(Set<Class<?>> classes, Set<String> opened, Set<String> exported) {
+ // Nop on JDK 8
+ }
+}
--- a/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/MxJUnitWrapper.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/MxJUnitWrapper.java Fri Oct 11 12:08:01 2019 +0530
@@ -29,18 +29,13 @@
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintStream;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
import java.util.ServiceLoader;
import java.util.Set;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import java.util.TreeSet;
import org.junit.internal.JUnitSystem;
import org.junit.internal.RealSystem;
@@ -59,6 +54,14 @@
public class MxJUnitWrapper {
+ // Unit tests that start a JVM subprocess can use these system properties to
+ // add --add-exports and --add-opens as necessary to the JVM command line.
+ //
+ // Known usages:
+ // org.graalvm.compiler.test.SubprocessUtil.getPackageOpeningOptions()
+ public static final String OPENED_PACKAGES_PROPERTY_NAME = "com.oracle.mxtool.junit.opens";
+ public static final String EXPORTED_PACKAGES_PROPERTY_NAME = "com.oracle.mxtool.junit.exports";
+
public static class MxJUnitConfig {
public boolean verbose = false;
@@ -136,14 +139,25 @@
String[] expandedArgs = expandArgs(args);
int i = 0;
+ List<String> testSpecs = new ArrayList<>();
+ List<String> openPackagesSpecs = new ArrayList<>();
while (i < expandedArgs.length) {
String each = expandedArgs[i];
if (each.charAt(0) == '-') {
// command line arguments
if (each.contentEquals("-JUnitVerbose")) {
config.verbose = true;
+ config.enableTiming = true;
+ } else if (each.contentEquals("-JUnitOpenPackages")) {
+ if (i + 1 >= expandedArgs.length) {
+ system.out().println("Must include argument for -JUnitAddExports");
+ System.exit(1);
+ }
+ openPackagesSpecs.add(expandedArgs[++i]);
} else if (each.contentEquals("-JUnitVeryVerbose")) {
+ config.verbose = true;
config.veryVerbose = true;
+ config.enableTiming = true;
} else if (each.contentEquals("-JUnitFailFast")) {
config.failFast = true;
} else if (each.contentEquals("-JUnitEnableTiming")) {
@@ -172,21 +186,35 @@
}
} else {
-
- try {
- builder.addTestSpec(each);
- } catch (MxJUnitRequest.BuilderException ex) {
- system.out().println(ex.getMessage());
- System.exit(1);
- }
+ testSpecs.add(each);
}
i++;
}
- MxJUnitRequest request = builder.build();
+ ModuleSupport moduleSupport = new ModuleSupport(system.out());
+ Set<String> opened = new TreeSet<>();
+ Set<String> exported = new TreeSet<>();
+ for (String spec : openPackagesSpecs) {
+ moduleSupport.openPackages(spec, "-JUnitOpenPackages", opened, exported);
+ }
- if (System.getProperty("java.specification.version").compareTo("1.9") >= 0) {
- addExports(request.classes, system.out());
+ for (String spec : testSpecs) {
+ try {
+ builder.addTestSpec(spec);
+ } catch (MxJUnitRequest.BuilderException ex) {
+ system.out().println(ex.getMessage());
+ System.exit(1);
+ }
+ }
+
+ MxJUnitRequest request = builder.build();
+ moduleSupport.processAddExportsAnnotations(request.classes, opened, exported);
+
+ if (!opened.isEmpty()) {
+ System.setProperty(OPENED_PACKAGES_PROPERTY_NAME, String.join(System.lineSeparator(), opened));
+ }
+ if (!exported.isEmpty()) {
+ System.setProperty(EXPORTED_PACKAGES_PROPERTY_NAME, String.join(System.lineSeparator(), exported));
}
for (RunListener p : ServiceLoader.load(RunListener.class)) {
@@ -285,8 +313,6 @@
return result;
}
- private static final Pattern MODULE_PACKAGE_RE = Pattern.compile("([^/]+)/(.+)");
-
private static class Timing<T> implements Comparable<Timing<T>> {
final T subject;
final long value;
@@ -344,93 +370,6 @@
}
/**
- * Adds the super types of {@code cls} to {@code supertypes}.
- */
- private static void gatherSupertypes(Class<?> cls, Set<Class<?>> supertypes) {
- if (!supertypes.contains(cls)) {
- supertypes.add(cls);
- Class<?> superclass = cls.getSuperclass();
- if (superclass != null) {
- gatherSupertypes(superclass, supertypes);
- }
- for (Class<?> iface : cls.getInterfaces()) {
- gatherSupertypes(iface, supertypes);
- }
- }
- }
-
- /**
- * Updates modules specified in {@code AddExport} annotations on {@code classes} to export
- * concealed packages to the annotation classes' declaring modules.
- */
- private static void addExports(Set<Class<?>> classes, PrintStream out) {
- Set<Class<?>> types = new HashSet<>();
- for (Class<?> cls : classes) {
- gatherSupertypes(cls, types);
- }
- for (Class<?> cls : types) {
- Annotation[] annos = cls.getAnnotations();
- for (Annotation a : annos) {
- Class<? extends Annotation> annotationType = a.annotationType();
- if (annotationType.getSimpleName().equals("AddExports")) {
- Optional<String[]> value = getElement("value", String[].class, a);
- if (value.isPresent()) {
- for (String export : value.get()) {
- Matcher m = MODULE_PACKAGE_RE.matcher(export);
- if (m.matches()) {
- String moduleName = m.group(1);
- String packageName = m.group(2);
- JLModule module = JLModule.find(moduleName);
- if (module == null) {
- out.printf("%s: Cannot find module named %s specified in \"AddExports\" annotation: %s%n", cls.getName(), moduleName, a);
- } else {
- if (packageName.equals("*")) {
- module.exportAllPackagesTo(JLModule.fromClass(cls));
- } else {
- module.addExports(packageName, JLModule.fromClass(cls));
- module.addOpens(packageName, JLModule.fromClass(cls));
- }
- }
- } else {
- out.printf("%s: Ignoring \"AddExports\" annotation with value not matching <module>/<package> pattern: %s%n", cls.getName(), a);
- }
- }
- } else {
- out.printf("%s: Ignoring \"AddExports\" annotation without `String value` element: %s%n", cls.getName(), a);
- }
- }
- }
- }
- }
-
- /**
- * Gets the value of the element named {@code name} of type {@code type} from {@code annotation}
- * if present.
- *
- * @return the requested element value wrapped in an {@link Optional} or
- * {@link Optional#empty()} if {@code annotation} has no element named {@code name}
- * @throws AssertionError if {@code annotation} has an element of the given name but whose type
- * is not {@code type} or if there's some problem reading the value via reflection
- */
- private static <T> Optional<T> getElement(String name, Class<T> type, Annotation annotation) {
- Class<? extends Annotation> annotationType = annotation.annotationType();
- Method valueAccessor;
- try {
- valueAccessor = annotationType.getMethod(name);
- if (!valueAccessor.getReturnType().equals(type)) {
- throw new AssertionError(String.format("Element %s of %s is of type %s, not %s ", name, annotationType.getName(), valueAccessor.getReturnType().getName(), type.getName()));
- }
- } catch (NoSuchMethodException e) {
- return Optional.empty();
- }
- try {
- return Optional.of(type.cast(valueAccessor.invoke(annotation)));
- } catch (Exception e) {
- throw new AssertionError(String.format("Could not read %s element from %s", name, annotation), e);
- }
- }
-
- /**
* Expand any arguments starting with @ and return the resulting argument array.
*
* @return the expanded argument array
--- a/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/TextRunListener.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/TextRunListener.java Fri Oct 11 12:08:01 2019 +0530
@@ -74,7 +74,8 @@
}
public static RunListener createRunListener(MxRunListener l) {
- return new TextListener(l.getWriter()) {
+ PrintStream theWriter = l.getWriter();
+ return new TextListener(theWriter) {
private Class<?> lastClass;
private int passedInLastClass;
private int failedInLastClass;
@@ -143,6 +144,15 @@
public void testAssumptionFailure(Failure failure) {
l.testAssumptionFailure(failure);
}
+
+ @Override
+ protected void printFailure(Failure each, String prefix) {
+ // Print out the test message in the same format used to run a single test:
+ // my.package.MyClass#methodName
+ String header = each.getDescription().getClassName() + "#" + each.getDescription().getMethodName();
+ theWriter.println(prefix + ") " + header);
+ theWriter.print(each.getTrace());
+ }
};
}
}
--- a/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/TimingDecorator.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/graalunit/com.oracle.mxtool.junit/com/oracle/mxtool/junit/TimingDecorator.java Fri Oct 11 12:08:01 2019 +0530
@@ -22,8 +22,8 @@
*/
package com.oracle.mxtool.junit;
-import java.util.HashMap;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import org.junit.runner.Description;
@@ -40,8 +40,8 @@
TimingDecorator(MxRunListener l) {
super(l);
- this.classTimes = new HashMap<>();
- this.testTimes = new HashMap<>();
+ this.classTimes = new ConcurrentHashMap<>();
+ this.testTimes = new ConcurrentHashMap<>();
}
@Override
--- a/test/hotspot/jtreg/compiler/graalunit/common/GraalUnitTestLauncher.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/graalunit/common/GraalUnitTestLauncher.java Fri Oct 11 12:08:01 2019 +0530
@@ -229,6 +229,8 @@
javaFlags.add("jdk.internal.vm.compiler,jdk.internal.vm.ci");
javaFlags.add("--add-exports");
javaFlags.add("java.base/jdk.internal.module=ALL-UNNAMED");
+ javaFlags.add("--add-exports");
+ javaFlags.add("java.base/jdk.internal.misc=ALL-UNNAMED");
javaFlags.addAll(getModuleExports("jdk.internal.vm.compiler", "ALL-UNNAMED"));
javaFlags.addAll(getModuleExports("jdk.internal.vm.ci", "ALL-UNNAMED,jdk.internal.vm.compiler"));
--- a/test/hotspot/jtreg/compiler/linkage/TestLinkageErrorInGenerateOopMap.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/compiler/linkage/TestLinkageErrorInGenerateOopMap.java Fri Oct 11 12:08:01 2019 +0530
@@ -47,6 +47,8 @@
"-XX:-BytecodeVerificationLocal",
"-XX:-TieredCompilation",
"-XX:CompileCommand=dontinline,compiler/linkage/OSRWithBadOperandStack.m*",
+ "-XX:-CreateCoredumpOnCrash",
+ "-Xmx64m",
"compiler.linkage.TestLinkageErrorInGenerateOopMap", "run"};
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flags);
OutputAnalyzer out = new OutputAnalyzer(pb.start());
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/LoopUnrollBadNodeBudget.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8229499
+ * @summary Node estimate for loop unrolling is not correct/sufficient:
+ * assert(delta <= 2 * required) failed: Bad node estimate ...
+ *
+ * @requires !vm.graal.enabled
+ *
+ * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation
+ * LoopUnrollBadNodeBudget
+ *
+ */
+
+public class LoopUnrollBadNodeBudget {
+
+ int a;
+ long b;
+ int c;
+ int d(long e, short f, int g) {
+ int h, j = 2, k, l[][] = new int[a][];
+ for (h = 8; h < 58; ++h)
+ for (k = 1; 7 > k; ++k)
+ switch (h % 9 * 5 + 43) {
+ case 70:
+ case 65:
+ case 86:
+ case 81:
+ case 62:
+ case 69:
+ case 74:
+ g = j;
+ }
+ long m = u(l);
+ return (int)m;
+ }
+ void n(int p, int o) { d(b, (short)0, p); }
+ void r(String[] q) {
+ int i = 4;
+ n(i, c);
+ }
+ long u(int[][] a) {
+ long sum = 0;
+ return sum;
+ }
+ public static void main(String[] t) {
+ try {
+ LoopUnrollBadNodeBudget s = new LoopUnrollBadNodeBudget();
+ for (int i = 5000; i > 0; i--)
+ s.r(t);
+ } catch (Exception ex) {
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/SplitIfSharedFastLockBehindCastPP.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8231620
+ * @summary assert(bol->is_Bool()) crash during split if due to FastLockNode
+ *
+ * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement SplitIfSharedFastLockBehindCastPP
+ */
+
+
+public class SplitIfSharedFastLockBehindCastPP {
+ private static boolean field;
+ private static A obj_field;
+
+ public static void main(String[] args) {
+ A lock = new A();
+ obj_field = lock;
+ for (int i = 0; i < 20_000; i++) {
+ test1(true, lock);
+ test1(false, lock);
+ test2(true);
+ test2(false);
+ }
+ }
+
+ private static void test1(boolean flag, Object obj) {
+ if (obj == null) {
+ }
+
+ boolean flag2;
+ if (flag) {
+ flag2 = true;
+ } else {
+ flag2 = false;
+ obj = obj_field;
+ }
+
+ // This loop will be unswitched. The condition becomes candidate for split if
+ for (int i = 0; i < 100; i++) {
+ if (flag2) {
+ field = true;
+ } else {
+ field = false;
+ }
+ synchronized (obj) {
+ field = true;
+ }
+ }
+ }
+
+ private static Object test2(boolean flag) {
+ int integer;
+ if (flag) {
+ field = true;
+ integer = 1;
+ } else {
+ field = false;
+ integer = 2;
+ }
+
+ Object obj = integer;
+
+ // This loop will be unswitched. The condition becomes candidate for split if
+ for (int i = 0; i < 100; i++) {
+ if (integer == 1) {
+ field = true;
+ } else {
+ field = false;
+ }
+ synchronized (obj) {
+ field = true;
+ }
+ }
+ return obj;
+ }
+
+ private static final class A {
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/TestCMovWithOpaque.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8231223
+ * @summary Test conditional move optimization encountering an Opaque4Node.
+ * @run main/othervm -Xbatch -XX:-TieredCompilation
+ * -XX:CompileCommand=inline,compiler.loopopts.TestCMovWithOpaque::test
+ * compiler.loopopts.TestCMovWithOpaque
+ */
+
+package compiler.loopopts;
+
+public class TestCMovWithOpaque {
+
+ public static void test(int array[]) {
+ for (int i = 1; i < 8; i += 3) {
+ for (int j = 0; j < 4; ++j) {
+ switch (i % 4) {
+ case 0:
+ break;
+ case 1:
+ break;
+ case 2:
+ break;
+ case 3:
+ array[j] += 42;
+ break;
+ }
+ }
+ }
+ }
+
+ public static void main(String[] args) {
+ int[] array = new int[4];
+ for (int i = 0; i < 20_000; ++i) {
+ test(array);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopstripmining/TestConservativeAntiDep.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8231550
+ * @summary C2: ShouldNotReachHere() in verify_strip_mined_scheduling
+ *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:LoopMaxUnroll=0 TestConservativeAntiDep
+ *
+ */
+
+import java.lang.reflect.Array;
+import java.util.Arrays;
+
+public class TestConservativeAntiDep {
+ private static long longField;
+
+ public static void main(String[] args) throws InstantiationException, IllegalAccessException {
+ for (int i = 0; i < 20_000; i++) {
+ test1(A.class);
+ test2(B.class);
+ }
+ }
+
+ private static int test1(Class klass) {
+ Object[] in = (Object[])Array.newInstance(klass, 100);
+
+ Object[] o = in;
+ int v = 1;
+ // CountedLoop has control dependent CastPP
+ for (int i = 0; i < 100 ; i++) {
+ longField = i; // sunk in outer strip mined loop
+ o = (A[]) o;
+ v *= 2;
+ }
+
+ // LoadRange cannot float higher than CountedLoop (because of
+ // CastPP) and is found anti-dependent with long store so
+ // scheduled in outer strip mined loop
+ return v + o.length;
+ }
+
+ private static int test2(Class klass) throws IllegalAccessException, InstantiationException {
+ A in = (A)klass.newInstance();
+
+ A o = in;
+ int v = 1;
+ // CountedLoop has control dependent CastPP
+ for (int i = 0; i < 100 ; i++) {
+ longField = i; // sunk in outer strip mined loop
+ o = (B) o;
+ v *= 2;
+ }
+
+ // Load cannot float higher than CountedLoop (because of
+ // CastPP) and is found anti-dependent with long store so
+ // scheduled in outer strip mined loop
+ return v + o.intField;
+ }
+
+ private static class A {
+ int intField;
+ public A() {}
+ }
+
+ private static class B extends A {
+ public B() {}
+ }
+}
--- a/test/hotspot/jtreg/gc/g1/TestRemsetLoggingThreads.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/gc/g1/TestRemsetLoggingThreads.java Fri Oct 11 12:08:01 2019 +0530
@@ -54,7 +54,7 @@
OutputAnalyzer output = new OutputAnalyzer(pb.start());
- String pattern = "Concurrent RS threads times \\(s\\)$";
+ String pattern = "Concurrent refinement threads times \\(s\\)$";
Matcher m = Pattern.compile(pattern, Pattern.MULTILINE).matcher(output.getStdout());
if (!m.find()) {
--- a/test/hotspot/jtreg/gc/g1/TestRemsetLoggingTools.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/gc/g1/TestRemsetLoggingTools.java Fri Oct 11 12:08:01 2019 +0530
@@ -110,7 +110,7 @@
}
public static void expectRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
- int actualTotal = result.split("concurrent refinement").length - 1;
+ int actualTotal = result.split("concurrent refinement statistics").length - 1;
int actualCumulative = result.split("Cumulative RS summary").length - 1;
if (expectedCumulative != actualCumulative) {
--- a/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java Fri Oct 11 12:08:01 2019 +0530
@@ -32,12 +32,6 @@
* -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4
* -Dtarget=1000
* TestGCThreadGroups
- *
- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive
- * -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2
- * -Dtarget=1000
- * TestGCThreadGroups
*/
/**
@@ -71,46 +65,22 @@
* TestGCThreadGroups
*
* @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive
- * -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2
- * -Dtarget=1000
- * TestGCThreadGroups
- *
- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
* -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static
* -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4
* -Dtarget=1000
* TestGCThreadGroups
*
* @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static
- * -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2
- * -Dtarget=1000
- * TestGCThreadGroups
- *
- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
* -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact
* -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4
* -Dtarget=100
* TestGCThreadGroups
*
* @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact
- * -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2
- * -Dtarget=100
- * TestGCThreadGroups
- *
- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
* -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
* -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4
* -Dtarget=100
* TestGCThreadGroups
- *
- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
- * -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2
- * -Dtarget=100
- * TestGCThreadGroups
*/
/**
@@ -130,18 +100,6 @@
* -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4
* -Dtarget=1000
* TestGCThreadGroups
- *
- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal
- * -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2
- * -Dtarget=1000
- * TestGCThreadGroups
- *
- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=traversal -XX:ShenandoahGCHeuristics=aggressive
- * -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2
- * -Dtarget=1000
- * TestGCThreadGroups
*/
public class TestGCThreadGroups {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/CallMultipleCatchProjs.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8231405
+ * @summary barrier expansion breaks if barrier is right after call to rethrow stub
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ *
+ * @run main/othervm -XX:CompileOnly=CallMultipleCatchProjs::test -Xcomp -Xverify:none -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC CallMultipleCatchProjs
+ *
+ */
+
+public class CallMultipleCatchProjs {
+ private static A field = new A();
+
+ public static void main(String[] args) throws Exception {
+ Exception3 exception3 = new Exception3();
+ test(new Exception2());
+ }
+
+ static int test(Exception exception) throws Exception {
+ try {
+ throw exception;
+ } catch (Exception1 e1) {
+ return 1;
+ } catch (Exception2 e2) {
+ return field.i + 2;
+ } catch (Exception3 e3) {
+ return field.i + 3;
+ }
+ }
+
+ private static class Exception1 extends Exception {
+ }
+
+ private static class Exception2 extends Exception {
+ }
+
+ private static class Exception3 extends Exception {
+ }
+
+ private static class A {
+ public int i;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestClone.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestClone
+ * @summary Test clone barriers work correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -Xint
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:-TieredCompilation
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:TieredStopAtLevel=1
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:TieredStopAtLevel=4
+ * TestClone
+ */
+
+/*
+ * @test TestClone
+ * @summary Test clone barriers work correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -Xint
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -XX:-TieredCompilation
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -XX:TieredStopAtLevel=1
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -XX:TieredStopAtLevel=4
+ * TestClone
+ */
+
+/*
+ * @test TestClone
+ * @summary Test clone barriers work correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -Xint
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -XX:-TieredCompilation
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -XX:TieredStopAtLevel=1
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -XX:TieredStopAtLevel=4
+ * TestClone
+ */
+
+/*
+ * @test TestClone
+ * @summary Test clone barriers work correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64")
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -Xint
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:-TieredCompilation
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:TieredStopAtLevel=1
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:TieredStopAtLevel=4
+ * TestClone
+ */
+
+/*
+ * @test TestClone
+ * @summary Test clone barriers work correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64")
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -Xint
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -XX:-TieredCompilation
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -XX:TieredStopAtLevel=1
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC
+ * -XX:+ShenandoahVerify
+ * -XX:TieredStopAtLevel=4
+ * TestClone
+ */
+
+/*
+ * @test TestClone
+ * @summary Test clone barriers work correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled & (vm.bits == "64")
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -Xint
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -XX:-TieredCompilation
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -XX:TieredStopAtLevel=1
+ * TestClone
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g
+ * -XX:-UseCompressedOops
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ * -XX:TieredStopAtLevel=4
+ * TestClone
+ */
+
+
+public class TestClone {
+
+ public static void main(String[] args) throws Exception {
+ for (int i = 0; i < 10000; i++) {
+ Object[] src = new Object[i];
+ for (int c = 0; c < src.length; c++) {
+ src[c] = new Object();
+ }
+ testWith(src);
+ }
+ }
+
+ static void testWith(Object[] src) {
+ Object[] dst = src.clone();
+ int srcLen = src.length;
+ int dstLen = dst.length;
+ if (srcLen != dstLen) {
+ throw new IllegalStateException("Lengths do not match: " + srcLen + " vs " + dstLen);
+ }
+ for (int c = 0; c < src.length; c++) {
+ Object s = src[c];
+ Object d = dst[c];
+ if (s != d) {
+ throw new IllegalStateException("Elements do not match at " + c + ": " + s + " vs " + d);
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestHumongousMoves.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestHumongousMoves
+ * @summary Check Shenandoah reacts on setting humongous moves correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive
+ * -XX:-ShenandoahHumongousMoves
+ * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify
+ * TestHumongousMoves
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g
+ * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive
+ * -XX:+ShenandoahHumongousMoves
+ * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify
+ * TestHumongousMoves
+ */
+
+import java.util.Random;
+
+public class TestHumongousMoves {
+
+ static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+
+ static volatile Object sink;
+
+ public static void main(String[] args) throws Exception {
+ final int min = 0;
+ final int max = 384 * 1024;
+ long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
+
+ Random r = new Random();
+ for (long c = 0; c < count; c++) {
+ sink = new int[min + r.nextInt(max - min)];
+ }
+ }
+
+}
--- a/test/hotspot/jtreg/gc/shenandoah/options/TestThreadCounts.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestThreadCounts.java Fri Oct 11 12:08:01 2019 +0530
@@ -61,7 +61,7 @@
output.shouldHaveExitValue(1);
} else if (conc > par) {
output.shouldContain("Shenandoah expects ConcGCThreads <= ParallelGCThreads");
- output.shouldHaveExitValue(0);
+ output.shouldHaveExitValue(1);
} else {
output.shouldNotContain("Shenandoah expects ConcGCThreads <= ParallelGCThreads");
output.shouldHaveExitValue(0);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestThreadCountsOverride.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestThreadCountsOverride
+ * @summary Test that Shenandoah GC thread counts are overridable
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @run driver TestThreadCountsOverride
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestThreadCountsOverride {
+ public static void main(String[] args) throws Exception {
+ {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseShenandoahGC",
+ "-XX:ParallelGCThreads=1",
+ "-XX:+PrintFlagsFinal",
+ "-version");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ output.shouldMatch("ParallelGCThreads(.*)= 1 ");
+ output.shouldHaveExitValue(0);
+ }
+
+ {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseShenandoahGC",
+ "-XX:ConcGCThreads=1",
+ "-XX:+PrintFlagsFinal",
+ "-version");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ output.shouldMatch("ConcGCThreads(.*)= 1 ");
+ output.shouldHaveExitValue(0);
+ }
+ }
+
+}
--- a/test/hotspot/jtreg/runtime/LoadClass/LongBCP.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/runtime/LoadClass/LongBCP.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,6 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
-import jdk.test.lib.Platform;
import jdk.test.lib.compiler.CompilerUtils;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
@@ -126,13 +125,8 @@
bootCP, "Hello");
output = new OutputAnalyzer(pb.start());
- if (!Platform.isWindows()) {
- output.shouldContain("Hello World")
- .shouldHaveExitValue(0);
- } else {
- output.shouldContain("Could not find or load main class Hello")
- .shouldHaveExitValue(1);
- }
+ output.shouldContain("Hello World")
+ .shouldHaveExitValue(0);
// total relative path length exceeds MAX_PATH
destDir = Paths.get(destDir.toString(), "yyyyyyyy");
@@ -144,12 +138,7 @@
bootCP, "Hello");
output = new OutputAnalyzer(pb.start());
- if (!Platform.isWindows()) {
- output.shouldContain("Hello World")
- .shouldHaveExitValue(0);
- } else {
- output.shouldContain("Could not find or load main class Hello")
- .shouldHaveExitValue(1);
- }
+ output.shouldContain("Hello World")
+ .shouldHaveExitValue(0);
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/Safepoint/NoSafepointVerifier.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8184732
+ * @summary Ensure that special locks never safepoint check and are vm_block.
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main NoSafepointVerifier
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.Platform;
+
+import sun.hotspot.WhiteBox;
+
+public class NoSafepointVerifier {
+
+ static void runTest(String test) throws Exception {
+ if (Platform.isDebugBuild()){
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-Xbootclasspath/a:.",
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-XX:-CreateCoredumpOnCrash",
+ "NoSafepointVerifier",
+ test);
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldContain(test);
+ }
+ }
+
+ static String test1 = "Special locks or below should never safepoint";
+ static String test2 = "Special locks or below should allow the vm to block";
+ static String test3 = "Possible safepoint reached by thread that does not allow it";
+
+ public static void main(String args[]) throws Exception {
+ if (args.length > 0) {
+ if (args[0].equals(test1)) {
+ WhiteBox.getWhiteBox().assertSpecialLock(/*vm_block*/true, /*safepoint_check_always*/true);
+ } else if (args[0].equals(test2)) {
+ WhiteBox.getWhiteBox().assertSpecialLock(/*vm_block*/false, /*safepoint_check_always*/false);
+ } else if (args[0].equals(test3)) {
+ WhiteBox.getWhiteBox().assertSpecialLock(/*vm_block*/true, /*safepoint_check_always*/false);
+ }
+ } else {
+ runTest(test1);
+ runTest(test2);
+ runTest(test3);
+ }
+ }
+}
--- a/test/hotspot/jtreg/runtime/cds/appcds/LotsOfClasses.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/runtime/cds/appcds/LotsOfClasses.java Fri Oct 11 12:08:01 2019 +0530
@@ -39,7 +39,7 @@
public class LotsOfClasses {
- public static void main(String[] args) throws Throwable {
+ public static void main(String[] args) throws Exception {
ArrayList<String> list = new ArrayList<>();
TestCommon.findAllClasses(list);
--- a/test/hotspot/jtreg/runtime/cds/appcds/TestCommon.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/runtime/cds/appcds/TestCommon.java Fri Oct 11 12:08:01 2019 +0530
@@ -611,7 +611,7 @@
static Pattern pattern;
- static void findAllClasses(ArrayList<String> list) throws Throwable {
+ static void findAllClasses(ArrayList<String> list) throws Exception {
// Find all the classes in the jrt file system
pattern = Pattern.compile("/modules/[a-z.]*[a-z]+/([^-]*)[.]class");
FileSystem fs = FileSystems.getFileSystem(URI.create("jrt:/"));
@@ -619,7 +619,7 @@
findAllClassesAtPath(base, list);
}
- private static void findAllClassesAtPath(Path p, ArrayList<String> list) throws Throwable {
+ private static void findAllClassesAtPath(Path p, ArrayList<String> list) throws Exception {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(p)) {
for (Path entry: stream) {
Matcher matcher = pattern.matcher(entry.toString());
@@ -629,7 +629,7 @@
}
try {
findAllClassesAtPath(entry, list);
- } catch (Throwable t) {}
+ } catch (Exception ex) {}
}
}
}
--- a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicLotsOfClasses.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicLotsOfClasses.java Fri Oct 11 12:08:01 2019 +0530
@@ -45,22 +45,16 @@
public class DynamicLotsOfClasses extends DynamicArchiveTestBase {
- public static void main(String[] args) throws Throwable {
+ public static void main(String[] args) throws Exception {
runTest(DynamicLotsOfClasses::testDefaultBase);
}
static void testDefaultBase() throws Exception {
String topArchiveName = getNewArchiveName("top");
- try {
- doTest(topArchiveName);
- } catch (Throwable th) {
- System.out.println(th.toString());
- Exception ex = new Exception(th);
- throw ex;
- }
+ doTest(topArchiveName);
}
- private static void doTest(String topArchiveName) throws Throwable {
+ private static void doTest(String topArchiveName) throws Exception {
ArrayList<String> list = new ArrayList<>();
TestCommon.findAllClasses(list);
--- a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/HelloDynamic.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/HelloDynamic.java Fri Oct 11 12:08:01 2019 +0530
@@ -52,6 +52,9 @@
doTest(baseArchiveName, topArchiveName);
}
+ private static final String JDWP_OPTION =
+ "-Xrunjdwp:transport=dt_socket,server=y,suspend=n";
+
private static void doTest(String baseArchiveName, String topArchiveName) throws Exception {
String appJar = ClassFileInstaller.getJarPath("hello.jar");
String mainClass = "Hello";
@@ -71,5 +74,19 @@
output.shouldContain("Hello source: shared objects file")
.shouldHaveExitValue(0);
});
+
+ // Sanity test with JDWP options.
+ // Test with the default base archive should be sufficient.
+ if (baseArchiveName == null) {
+ run2(baseArchiveName, topArchiveName,
+ JDWP_OPTION,
+ "-Xlog:class+load",
+ "-Xlog:cds+dynamic=debug,cds=debug",
+ "-cp", appJar, mainClass)
+ .assertNormalExit(output -> {
+ output.shouldContain("Hello source: shared objects file")
+ .shouldHaveExitValue(0);
+ });
+ }
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo/GetOwnedMonitorInfoWithEATest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8230677
+ * @summary Test JVMTI's GetOwnedMonitorInfo with scalar replaced objects and eliminated locks on stack (optimizations based on escape analysis).
+ * @comment Without RFE 8227745 escape analysis needs to be switched off to pass the test. For the implementation of RFE 8227745 it serves as a regression test.
+ * @requires (vm.compMode != "Xcomp" & vm.compiler2.enabled)
+ * @library /test/lib
+ * @compile GetOwnedMonitorInfoWithEATest.java
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking
+ * GetOwnedMonitorInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:-EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking -XX:-UseOptoBiasInlining
+ * GetOwnedMonitorInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking
+ * GetOwnedMonitorInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:-DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking
+ * GetOwnedMonitorInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:-UseBiasedLocking
+ * GetOwnedMonitorInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:-UseBiasedLocking
+ * GetOwnedMonitorInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:-DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:-UseBiasedLocking
+ * GetOwnedMonitorInfoWithEATest
+ */
+
+import jdk.test.lib.Asserts;
+
+public class GetOwnedMonitorInfoWithEATest {
+
+ public static final int COMPILE_THRESHOLD = 20000;
+
+ /**
+ * Native wrapper arround JVMTI's GetOwnedMonitorInfo().
+ * @param t The thread for which the owned monitors information should be retrieved.
+ * @param ownedMonitors Array filled in by the call with the objects associated
+ * with the monitors owned by the given thread.
+ * @param depths Per owned monitor the depth of the frame were it was locked.
+ * Filled in by the call
+ * @return Number of monitors owned by the given thread.
+ */
+ public static native int getOwnedMonitorInfo(Thread t, Object[] ownedMonitors);
+
+ public static void main(String[] args) throws Exception {
+ new GetOwnedMonitorInfoWithEATest().runTest();
+ }
+
+ public void runTest() throws Exception {
+ new TestCase_1().run();
+ new TestCase_2().run();
+ }
+
+ public static abstract class TestCaseBase implements Runnable {
+
+ public long checkSum;
+ public boolean doLoop;
+ public volatile long loopCount;
+ public volatile boolean targetIsInLoop;
+
+ public void run() {
+ try {
+ msgHL("Executing test case " + getClass().getName());
+ warmUp();
+ runTest();
+ } catch (Exception e) {
+ Asserts.fail("Unexpected Exception", e);
+ }
+ }
+
+ public void warmUp() {
+ int callCount = COMPILE_THRESHOLD + 1000;
+ doLoop = true;
+ while (callCount-- > 0) {
+ dontinline_testMethod();
+ }
+ }
+
+ public abstract void runTest() throws Exception;
+ public abstract void dontinline_testMethod();
+
+ public long dontinline_endlessLoop() {
+ long cs = checkSum;
+ while (doLoop && loopCount-- > 0) {
+ targetIsInLoop = true;
+ checkSum += checkSum % ++cs;
+ }
+ loopCount = 3;
+ targetIsInLoop = false;
+ return checkSum;
+ }
+
+ public void waitUntilTargetThreadHasEnteredEndlessLoop() throws Exception {
+ while(!targetIsInLoop) {
+ msg("Target has not yet entered the loop. Sleep 200ms.");
+ try { Thread.sleep(200); } catch (InterruptedException e) { /*ignore */ }
+ }
+ msg("Target has entered the loop.");
+ }
+
+ public void terminateEndlessLoop() throws Exception {
+ msg("Terminate endless loop");
+ do {
+ doLoop = false;
+ } while(targetIsInLoop);
+ }
+
+ public void msg(String m) {
+ System.out.println();
+ System.out.println("### " + m);
+ System.out.println();
+ }
+
+ public void msgHL(String m) {
+ System.out.println();
+ System.out.println("#####################################################");
+ System.out.println("### " + m);
+ System.out.println("###");
+ System.out.println();
+ }
+ }
+
+ /**
+ * Starts target thread T and then queries monitor information for T using JVMTI's GetOwnedMonitorInfo().
+ * With escape analysis enabled the jit compiled method {@link #dontinline_testMethod()} has
+ * scalar replaced objects with eliminated (nested) locking in scope when the monitor
+ * information is retrieved. Effectively the objects escape through the JVMTI call. This works
+ * only with RFE 8227745. Without it escape analysis needs to be disabled.
+ */
+ public static class TestCase_1 extends TestCaseBase {
+
+ public void runTest() throws Exception {
+ loopCount = 1L << 62; // endless loop
+ Thread t1 = new Thread(() -> dontinline_testMethod(), "Target Thread");
+ t1.start();
+ try {
+ waitUntilTargetThreadHasEnteredEndlessLoop();
+ int expectedMonitorCount = 1;
+ int resultSize = expectedMonitorCount + 3;
+ Object[] ownedMonitors = new Object[resultSize];
+ msg("Get monitor info");
+ int monitorCount = getOwnedMonitorInfo(t1, ownedMonitors);
+ terminateEndlessLoop();
+ t1.join();
+ Asserts.assertGreaterThanOrEqual(monitorCount, 0, "getOwnedMonitorsFor() call failed");
+ msg("Monitor info:");
+ for (int i = 0; i < monitorCount; i++) {
+ System.out.println(i + ": cls=" + (ownedMonitors[i] != null ? ownedMonitors[i].getClass() : null));
+ }
+ Asserts.assertEQ(monitorCount, expectedMonitorCount, "unexpected monitor count returned by getOwnedMonitorsFor()");
+ Asserts.assertNotNull(ownedMonitors[0]);
+ Asserts.assertSame(ownedMonitors[0].getClass(), LockCls.class);
+ } finally {
+ terminateEndlessLoop();
+ t1.join();
+ }
+ }
+
+ public void dontinline_testMethod() {
+ LockCls l1 = new LockCls(); // to be scalar replaced
+ synchronized (l1) {
+ inlinedTestMethodWithNestedLocking(l1);
+ }
+ }
+
+ public void inlinedTestMethodWithNestedLocking(LockCls l1) {
+ synchronized (l1) { // nested
+ dontinline_endlessLoop();
+ }
+ }
+ }
+
+ /**
+ * Similar to {@link TestCase_1}. Additionally the target thread T has got eliminated locking
+ * for a synchronized method of a different type {@linkplain LockCls2}.
+ */
+ public static class TestCase_2 extends TestCaseBase {
+
+ public void runTest() throws Exception {
+ loopCount = 1L << 62; // endless loop
+ Thread t1 = new Thread(() -> dontinline_testMethod(), "Target Thread");
+ t1.start();
+ try {
+ waitUntilTargetThreadHasEnteredEndlessLoop();
+ int expectedMonitorCount = 2;
+ int resultSize = expectedMonitorCount + 3;
+ Object[] ownedMonitors = new Object[resultSize];
+ msg("Get monitor info");
+ int monitorCount = getOwnedMonitorInfo(t1, ownedMonitors);
+ terminateEndlessLoop();
+ t1.join();
+ Asserts.assertGreaterThanOrEqual(monitorCount, 0, "getOwnedMonitorsFor() call failed");
+ msg("Monitor info:");
+ for (int i = 0; i < monitorCount; i++) {
+ System.out.println(i + ": cls=" + (ownedMonitors[i] != null ? ownedMonitors[i].getClass() : null));
+ }
+ Asserts.assertEQ(monitorCount, expectedMonitorCount, "unexpected monitor count returned by getOwnedMonitorsFor()");
+ Asserts.assertNotNull(ownedMonitors[0]);
+ Asserts.assertSame(ownedMonitors[0].getClass(), LockCls2.class);
+
+ Asserts.assertNotNull(ownedMonitors[1]);
+ Asserts.assertSame(ownedMonitors[1].getClass(), LockCls.class);
+ } finally {
+ terminateEndlessLoop();
+ t1.join();
+ }
+ }
+
+ public void dontinline_testMethod() {
+ LockCls l1 = new LockCls();
+ synchronized (l1) {
+ inlinedTestMethodWithNestedLocking(l1);
+ }
+ }
+
+ public void inlinedTestMethodWithNestedLocking(LockCls l1) {
+ synchronized (l1) {
+ dontinline_testMethod2();
+ }
+ }
+
+ public void dontinline_testMethod2() {
+ // Call synchronized method. Receiver of the call will be scalar replaced,
+ // and locking will be eliminated. Here we use a different type.
+ new LockCls2().inline_synchronized_testMethod(this);
+ }
+ }
+
+ public static class LockCls {
+ }
+
+ public static class LockCls2 {
+ public synchronized void inline_synchronized_testMethod(TestCaseBase testCase) {
+ testCase.dontinline_endlessLoop();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo/libGetOwnedMonitorInfoWithEATest.c Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include "jvmti.h"
+#include "jni.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef JNI_ENV_ARG
+
+#ifdef __cplusplus
+#define JNI_ENV_ARG(x, y) y
+#define JNI_ENV_PTR(x) x
+#else
+#define JNI_ENV_ARG(x,y) x, y
+#define JNI_ENV_PTR(x) (*x)
+#endif
+
+#endif
+
+#define FAILED -1
+
+static jvmtiEnv *jvmti;
+
+static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved);
+
+static void ShowErrorMessage(jvmtiEnv *jvmti, jvmtiError errCode, const char *message) {
+ char *errMsg;
+ jvmtiError result;
+
+ result = (*jvmti)->GetErrorName(jvmti, errCode, &errMsg);
+ if (result == JVMTI_ERROR_NONE) {
+ fprintf(stderr, "%s: %s (%d)\n", message, errMsg, errCode);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)errMsg);
+ } else {
+ fprintf(stderr, "%s (%d)\n", message, errCode);
+ }
+}
+
+JNIEXPORT jint JNICALL
+Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
+ return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT jint JNICALL
+Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) {
+ return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT jint JNICALL
+JNI_OnLoad(JavaVM *jvm, void *reserved) {
+ jint res;
+ JNIEnv *env;
+
+ res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &env),
+ JNI_VERSION_9);
+ if (res != JNI_OK || env == NULL) {
+ fprintf(stderr, "Error: GetEnv call failed(%d)!\n", res);
+ return JNI_ERR;
+ }
+
+ return JNI_VERSION_9;
+}
+
+static
+jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) {
+ jint res;
+ jvmtiError err;
+ jvmtiCapabilities caps;
+
+ printf("Agent_OnLoad started\n");
+
+ memset(&caps, 0, sizeof(caps));
+
+ res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti),
+ JVMTI_VERSION_9);
+ if (res != JNI_OK || jvmti == NULL) {
+ fprintf(stderr, "Error: wrong result of a valid call to GetEnv!\n");
+ return JNI_ERR;
+ }
+
+ caps.can_get_owned_monitor_info = 1;
+
+ err = (*jvmti)->AddCapabilities(jvmti, &caps);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "Agent_OnLoad: error in JVMTI AddCapabilities");
+ return JNI_ERR;
+ }
+
+ err = (*jvmti)->GetCapabilities(jvmti, &caps);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "Agent_OnLoad: error in JVMTI GetCapabilities");
+ return JNI_ERR;
+ }
+
+ if (!caps.can_get_owned_monitor_info) {
+ fprintf(stderr, "Warning: GetOwnedMonitorInfo is not implemented\n");
+ return JNI_ERR;
+ }
+
+ printf("Agent_OnLoad finished\n");
+ return JNI_OK;
+}
+
+JNIEXPORT jint JNICALL
+Java_GetOwnedMonitorInfoWithEATest_getOwnedMonitorInfo(JNIEnv *env, jclass cls, jobject targetThread, jobjectArray resOwnedMonitors) {
+ jvmtiError err;
+ jvmtiThreadInfo threadInfo;
+ jint monitorCount;
+ jobject* monitors;
+ jint idx;
+
+ err = (*jvmti)->GetThreadInfo(jvmti, targetThread, &threadInfo);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "getOwnedMonitorsFor: error in JVMTI GetThreadInfo");
+ return FAILED;
+ }
+
+ err = (*jvmti)->GetOwnedMonitorInfo(jvmti, targetThread, &monitorCount, &monitors);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "getOwnedMonitorsFor: error in JVMTI GetOwnedMonitorInfo");
+ return FAILED;
+ }
+
+ printf("getOwnedMonitorsFor: %s owns %d monitor(s)\n", threadInfo.name, monitorCount);
+
+ for (idx = 0; idx < monitorCount; idx++) {
+ (*env)->SetObjectArrayElement(env, resOwnedMonitors, idx, monitors[idx]);
+ }
+
+ (*jvmti)->Deallocate(jvmti, (unsigned char *) monitors);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *) threadInfo.name);
+ return monitorCount;
+}
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorStackDepthInfo/GetOwnedMonitorStackDepthInfoWithEATest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8230677
+ * @summary Test JVMTI's GetOwnedMonitorStackDepthInfo with scalar replaced objects and eliminated locks on stack (optimizations based on escape analysis).
+ * @comment Without RFE 8227745 escape analysis needs to be switched off to pass the test. For the implementation of RFE 8227745 it serves as a regression test.
+ * @requires (vm.compMode != "Xcomp" & vm.compiler2.enabled)
+ * @library /test/lib
+ * @compile GetOwnedMonitorStackDepthInfoWithEATest.java
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorStackDepthInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking
+ * GetOwnedMonitorStackDepthInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorStackDepthInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:-EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking -XX:-UseOptoBiasInlining
+ * GetOwnedMonitorStackDepthInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorStackDepthInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking
+ * GetOwnedMonitorStackDepthInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorStackDepthInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:-DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:+UseBiasedLocking
+ * GetOwnedMonitorStackDepthInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorStackDepthInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:-UseBiasedLocking
+ * GetOwnedMonitorStackDepthInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorStackDepthInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:+DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:-UseBiasedLocking
+ * GetOwnedMonitorStackDepthInfoWithEATest
+ * @run main/othervm/native
+ * -agentlib:GetOwnedMonitorStackDepthInfoWithEATest
+ * -XX:+UnlockDiagnosticVMOptions
+ * -Xms128m -Xmx128m
+ * -XX:CompileCommand=dontinline,*::dontinline_*
+ * -XX:+PrintCompilation
+ * -XX:+PrintInlining
+ * -XX:-TieredCompilation
+ * -Xbatch
+ * -XX:CICompilerCount=1
+ * -XX:-DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks -XX:-UseBiasedLocking
+ * GetOwnedMonitorStackDepthInfoWithEATest
+ */
+
+import jdk.test.lib.Asserts;
+
+public class GetOwnedMonitorStackDepthInfoWithEATest {
+
+ public static final int COMPILE_THRESHOLD = 20000;
+
+ /**
+ * Native wrapper arround JVMTI's GetOwnedMonitorStackDepthInfo().
+ * @param t The thread for which the owned monitors information should be retrieved.
+ * @param ownedMonitors Array filled in by the call with the objects associated
+ * with the monitors owned by the given thread.
+ * @param depths Per owned monitor the depth of the frame were it was locked.
+ * Filled in by the call
+ * @return Number of monitors owned by the given thread.
+ */
+ public static native int getOwnedMonitorStackDepthInfo(Thread t, Object[] ownedMonitors, int[] depths);
+
+ public static void main(String[] args) throws Exception {
+ new GetOwnedMonitorStackDepthInfoWithEATest().runTest();
+ }
+
+ public void runTest() throws Exception {
+ new TestCase_1().run();
+ new TestCase_2().run();
+ }
+
+ public static abstract class TestCaseBase implements Runnable {
+
+ public long checkSum;
+ public boolean doLoop;
+ public volatile long loopCount;
+ public volatile boolean targetIsInLoop;
+
+ public void run() {
+ try {
+ msgHL("Executing test case " + getClass().getName());
+ warmUp();
+ runTest();
+ } catch (Exception e) {
+ Asserts.fail("Unexpected Exception", e);
+ }
+ }
+
+ public void warmUp() {
+ int callCount = COMPILE_THRESHOLD + 1000;
+ doLoop = true;
+ while (callCount-- > 0) {
+ dontinline_testMethod();
+ }
+ }
+
+ public abstract void runTest() throws Exception;
+ public abstract void dontinline_testMethod();
+
+ public long dontinline_endlessLoop() {
+ long cs = checkSum;
+ while (doLoop && loopCount-- > 0) {
+ targetIsInLoop = true;
+ checkSum += checkSum % ++cs;
+ }
+ loopCount = 3;
+ targetIsInLoop = false;
+ return checkSum;
+ }
+
+ public void waitUntilTargetThreadHasEnteredEndlessLoop() throws Exception {
+ while(!targetIsInLoop) {
+ msg("Target has not yet entered the loop. Sleep 200ms.");
+ try { Thread.sleep(200); } catch (InterruptedException e) { /*ignore */ }
+ }
+ msg("Target has entered the loop.");
+ }
+
+ public void terminateEndlessLoop() throws Exception {
+ msg("Terminate endless loop");
+ do {
+ doLoop = false;
+ } while(targetIsInLoop);
+ }
+
+ public void msg(String m) {
+ System.out.println();
+ System.out.println("### " + m);
+ System.out.println();
+ }
+
+ public void msgHL(String m) {
+ System.out.println();
+ System.out.println("#####################################################");
+ System.out.println("### " + m);
+ System.out.println("###");
+ System.out.println();
+ }
+ }
+
+ /**
+ * Starts target thread T and then queries monitor information for T using JVMTI's GetOwnedMonitorStackDepthInfo().
+ * With escape analysis enabled the jit compiled method {@link #dontinline_testMethod()} has
+ * scalar replaced objects with eliminated (nested) locking in scope when the monitor
+ * information is retrieved. Effectively the objects escape through the JVMTI call. This works
+ * only with RFE 8227745. Without it escape analysis needs to be disabled.
+ */
+ public static class TestCase_1 extends TestCaseBase {
+
+ public void runTest() throws Exception {
+ loopCount = 1L << 62; // endless loop
+ Thread t1 = new Thread(() -> dontinline_testMethod(), "Target Thread");
+ try {
+ t1.start();
+ waitUntilTargetThreadHasEnteredEndlessLoop();
+ int expectedMonitorCount = 1;
+ int resultSize = expectedMonitorCount + 3;
+ Object[] ownedMonitors = new Object[resultSize];
+ int[] depths = new int[resultSize];
+ msg("Get monitor info");
+ int monitorCount = getOwnedMonitorStackDepthInfo(t1, ownedMonitors, depths);
+ Asserts.assertGreaterThanOrEqual(monitorCount, 0, "getOwnedMonitorsFor() call failed");
+ msg("Monitor info:");
+ for (int i = 0; i < monitorCount; i++) {
+ System.out.println(i + ": cls=" + (ownedMonitors[i] != null ? ownedMonitors[i].getClass() : null) + " depth=" + depths[i]);
+ }
+ Asserts.assertEQ(monitorCount, expectedMonitorCount, "unexpected monitor count returned by getOwnedMonitorsFor()");
+ Asserts.assertNotNull(ownedMonitors[0]);
+ Asserts.assertSame(ownedMonitors[0].getClass(), LockCls.class);
+ Asserts.assertEQ(depths[0], 1, "unexpected depth for owned monitor at index 0");
+ } finally {
+ terminateEndlessLoop();
+ t1.join();
+ }
+ }
+
+ public void dontinline_testMethod() {
+ LockCls l1 = new LockCls(); // to be scalar replaced
+ synchronized (l1) {
+ inlinedTestMethodWithNestedLocking(l1);
+ }
+ }
+
+ public void inlinedTestMethodWithNestedLocking(LockCls l1) {
+ synchronized (l1) { // nested
+ dontinline_endlessLoop();
+ }
+ }
+ }
+
+ /**
+ * Similar to {@link TestCase_1}. Additionally the target thread T has got eliminated locking
+ * for a synchronized method of a different type {@linkplain LockCls2}.
+ */
+ public static class TestCase_2 extends TestCaseBase {
+
+ public void runTest() throws Exception {
+ loopCount = 1L << 62; // endless loop
+ Thread t1 = new Thread(() -> dontinline_testMethod(), "Target Thread");
+ t1.start();
+ try {
+ waitUntilTargetThreadHasEnteredEndlessLoop();
+ int expectedMonitorCount = 2;
+ int resultSize = expectedMonitorCount + 3;
+ Object[] ownedMonitors = new Object[resultSize];
+ int[] depths = new int[resultSize];
+ msg("Get monitor info");
+ int monitorCount = getOwnedMonitorStackDepthInfo(t1, ownedMonitors, depths);
+ terminateEndlessLoop();
+ t1.join();
+ Asserts.assertGreaterThanOrEqual(monitorCount, 0, "getOwnedMonitorsFor() call failed");
+ msg("Monitor info:");
+ for (int i = 0; i < monitorCount; i++) {
+ System.out.println(i + ": cls=" + (ownedMonitors[i] != null ? ownedMonitors[i].getClass() : null) + " depth=" + depths[i]);
+ }
+ Asserts.assertEQ(monitorCount, expectedMonitorCount, "unexpected monitor count returned by getOwnedMonitorsFor()");
+ Asserts.assertNotNull(ownedMonitors[0]);
+ Asserts.assertSame(ownedMonitors[0].getClass(), LockCls2.class);
+ Asserts.assertEQ(depths[0], 1, "unexpected depth for owned monitor at index 0");
+
+ Asserts.assertNotNull(ownedMonitors[1]);
+ Asserts.assertSame(ownedMonitors[1].getClass(), LockCls.class);
+ Asserts.assertEQ(depths[1], 3, "unexpected depth for owned monitor at index 1");
+ } finally {
+ terminateEndlessLoop();
+ t1.join();
+ }
+ }
+
+ public void dontinline_testMethod() {
+ LockCls l1 = new LockCls();
+ synchronized (l1) {
+ inlinedTestMethodWithNestedLocking(l1);
+ }
+ }
+
+ public void inlinedTestMethodWithNestedLocking(LockCls l1) {
+ synchronized (l1) {
+ dontinline_testMethod2();
+ }
+ }
+
+ public void dontinline_testMethod2() {
+ // Call synchronized method. Receiver of the call will be scalar replaced,
+ // and locking will be eliminated. Here we use a different type.
+ new LockCls2().inline_synchronized_testMethod(this);
+ }
+ }
+
+ public static class LockCls {
+ }
+
+ public static class LockCls2 {
+ public synchronized void inline_synchronized_testMethod(TestCaseBase testCase) {
+ testCase.dontinline_endlessLoop();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorStackDepthInfo/libGetOwnedMonitorStackDepthInfoWithEATest.c Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include "jvmti.h"
+#include "jni.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef JNI_ENV_ARG
+
+#ifdef __cplusplus
+#define JNI_ENV_ARG(x, y) y
+#define JNI_ENV_PTR(x) x
+#else
+#define JNI_ENV_ARG(x,y) x, y
+#define JNI_ENV_PTR(x) (*x)
+#endif
+
+#endif
+
+#define FAILED -1
+
+static jvmtiEnv *jvmti;
+
+static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved);
+
+static void ShowErrorMessage(jvmtiEnv *jvmti, jvmtiError errCode, const char *message) {
+ char *errMsg;
+ jvmtiError result;
+
+ result = (*jvmti)->GetErrorName(jvmti, errCode, &errMsg);
+ if (result == JVMTI_ERROR_NONE) {
+ fprintf(stderr, "%s: %s (%d)\n", message, errMsg, errCode);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)errMsg);
+ } else {
+ fprintf(stderr, "%s (%d)\n", message, errCode);
+ }
+}
+
+JNIEXPORT jint JNICALL
+Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
+ return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT jint JNICALL
+Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) {
+ return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT jint JNICALL
+JNI_OnLoad(JavaVM *jvm, void *reserved) {
+ jint res;
+ JNIEnv *env;
+
+ res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &env),
+ JNI_VERSION_9);
+ if (res != JNI_OK || env == NULL) {
+ fprintf(stderr, "Error: GetEnv call failed(%d)!\n", res);
+ return JNI_ERR;
+ }
+
+ return JNI_VERSION_9;
+}
+
+static
+jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) {
+ jint res;
+ jvmtiError err;
+ jvmtiCapabilities caps;
+
+ printf("Agent_OnLoad started\n");
+
+ memset(&caps, 0, sizeof(caps));
+
+ res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti),
+ JVMTI_VERSION_9);
+ if (res != JNI_OK || jvmti == NULL) {
+ fprintf(stderr, "Error: wrong result of a valid call to GetEnv!\n");
+ return JNI_ERR;
+ }
+
+ caps.can_get_owned_monitor_stack_depth_info = 1;
+
+ err = (*jvmti)->AddCapabilities(jvmti, &caps);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "Agent_OnLoad: error in JVMTI AddCapabilities");
+ return JNI_ERR;
+ }
+
+ err = (*jvmti)->GetCapabilities(jvmti, &caps);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "Agent_OnLoad: error in JVMTI GetCapabilities");
+ return JNI_ERR;
+ }
+
+ if (!caps.can_get_owned_monitor_stack_depth_info) {
+ fprintf(stderr, "Warning: GetOwnedMonitorStackDepthInfo is not implemented\n");
+ return JNI_ERR;
+ }
+
+ printf("Agent_OnLoad finished\n");
+ return JNI_OK;
+}
+
+JNIEXPORT jint JNICALL
+Java_GetOwnedMonitorStackDepthInfoWithEATest_getOwnedMonitorStackDepthInfo(JNIEnv *env, jclass cls, jobject targetThread, jobjectArray ownedMonitors, jintArray depths) {
+ jvmtiError err;
+ jvmtiThreadInfo threadInfo;
+ jint monitorCount;
+ jvmtiMonitorStackDepthInfo* stackDepthInfo;
+ jint* depthsPtr;
+ jint idx = 0;
+
+ err = (*jvmti)->GetThreadInfo(jvmti, targetThread, &threadInfo);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "getOwnedMonitorsFor: error in JVMTI GetThreadInfo");
+ return FAILED;
+ }
+
+ err = (*jvmti)->GetOwnedMonitorStackDepthInfo(jvmti, targetThread, &monitorCount, &stackDepthInfo);
+ if (err != JVMTI_ERROR_NONE) {
+ ShowErrorMessage(jvmti, err,
+ "getOwnedMonitorsFor: error in JVMTI GetOwnedMonitorStackDepthInfo");
+ return FAILED;
+ }
+
+ printf("getOwnedMonitorsFor: %s owns %d monitor(s)\n", threadInfo.name, monitorCount);
+
+ depthsPtr = (*env)->GetIntArrayElements(env, depths, NULL);
+ for (idx = 0; idx < monitorCount; idx++) {
+ (*env)->SetObjectArrayElement(env, ownedMonitors, idx, stackDepthInfo[idx].monitor);
+ depthsPtr[idx] = stackDepthInfo[idx].stack_depth;
+ }
+ (*env)->ReleaseIntArrayElements(env, depths, depthsPtr, 0);
+
+ (*jvmti)->Deallocate(jvmti, (unsigned char *) stackDepthInfo);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *) threadInfo.name);
+ return monitorCount;
+}
+
+#ifdef __cplusplus
+}
+#endif
--- a/test/hotspot/jtreg/serviceability/jvmti/RedefineClasses/RedefineDeleteJmethod.java Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @bug 8181171
- * @summary Test deleting static method pointing to by a jmethod
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * @modules java.compiler
- * java.instrument
- * jdk.jartool/sun.tools.jar
- * @run main RedefineClassHelper
- * @run main/native/othervm -javaagent:redefineagent.jar -XX:+AllowRedefinitionToAddDeleteMethods -Xlog:redefine+class*=trace RedefineDeleteJmethod
- */
-
-class B {
- private static int deleteMe() { System.out.println("deleteMe called"); return 5; }
- public static int callDeleteMe() { return deleteMe(); }
-}
-
-public class RedefineDeleteJmethod {
-
- public static String newB =
- "class B {" +
- "public static int callDeleteMe() { return 6; }" +
- "}";
-
- public static String newerB =
- "class B {" +
- "private static int deleteMe() { System.out.println(\"deleteMe (2) called\"); return 7; }" +
- "public static int callDeleteMe() { return deleteMe(); }" +
- "}";
-
-
- static {
- System.loadLibrary("RedefineDeleteJmethod");
- }
-
- static native int jniCallDeleteMe();
-
- static void test(int expected, boolean nsme_expected) throws Exception {
- // Call through static method
- int res = B.callDeleteMe();
- System.out.println("Result = " + res);
- if (res != expected) {
- throw new Error("returned " + res + " expected " + expected);
- }
-
- // Call through jmethodID, saved from first call.
- try {
- res = jniCallDeleteMe();
- if (nsme_expected) {
- throw new RuntimeException("Failed, NoSuchMethodError expected");
- }
- if (res != expected) {
- throw new Error("returned " + res + " expected " + expected);
- }
- } catch (NoSuchMethodError ex) {
- if (!nsme_expected) {
- throw new RuntimeException("Failed, NoSuchMethodError not expected");
- }
- System.out.println("Passed, NoSuchMethodError expected");
- }
- }
-
- public static void main(String[] args) throws Exception {
- test(5, false);
- RedefineClassHelper.redefineClass(B.class, newB);
- test(6, true);
- RedefineClassHelper.redefineClass(B.class, newerB);
- test(7, true);
- }
-}
--- a/test/hotspot/jtreg/serviceability/jvmti/RedefineClasses/libRedefineDeleteJmethod.c Wed Oct 09 17:06:06 2019 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include <jni.h>
-
-jmethodID mid;
-jclass cls;
-static int count = 0;
-
-JNIEXPORT jint JNICALL
-Java_RedefineDeleteJmethod_jniCallDeleteMe(JNIEnv* env, jobject obj) {
-
- if (count == 0) {
- count++;
- cls = (*env)->FindClass(env, "B");
- if (NULL == cls) {
- (*env)->FatalError(env, "could not find class");
- }
-
- mid = (*env)->GetStaticMethodID(env, cls, "deleteMe", "()I");
- if (NULL == mid) {
- (*env)->FatalError(env, "could not find method");
- }
- }
-
- return (*env)->CallStaticIntMethod(env, cls, mid);
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/SuspendWithCurrentThread/SuspendWithCurrentThread.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8231595
+ * @summary [TEST] develop a test case for SuspendThreadList including current thread
+ * @library /test/lib
+ * @compile SuspendWithCurrentThread.java
+ * @run main/othervm/native -agentlib:SuspendWithCurrentThread SuspendWithCurrentThread SuspenderIndex=first
+ * @run main/othervm/native -agentlib:SuspendWithCurrentThread SuspendWithCurrentThread SuspenderIndex=last
+ */
+
+import java.io.PrintStream;
+
+public class SuspendWithCurrentThread {
+ private static final String AGENT_LIB = "SuspendWithCurrentThread";
+ private static final String SUSPENDER_OPT = "SuspenderIndex=";
+ private static final int THREADS_COUNT = 10;
+
+ private static void log(String msg) { System.out.println(msg); }
+
+ private static native void registerTestedThreads(Thread[] threads);
+ private static native boolean checkTestedThreadsSuspended();
+ private static native void resumeTestedThreads();
+ private static native void releaseTestedThreadsInfo();
+
+ // The suspender thread index defines the thread which has to suspend
+ // the tested threads including itself with the JVMTI SuspendThreadList
+ private static int suspenderIndex;
+
+ public static void main(String args[]) throws Exception {
+ try {
+ System.loadLibrary(AGENT_LIB);
+ log("Loaded library: " + AGENT_LIB);
+ } catch (UnsatisfiedLinkError ule) {
+ log("Failed to load library: " + AGENT_LIB);
+ log("java.library.path: " + System.getProperty("java.library.path"));
+ throw ule;
+ }
+ if (args.length != 1) {
+ throw new RuntimeException("Main: wrong arguments count: " + args.length + ", expected: 1");
+ }
+ String arg = args[0];
+ if (arg.equals(SUSPENDER_OPT + "first")) {
+ suspenderIndex = 0;
+ } else if (arg.equals(SUSPENDER_OPT + "last")) {
+ suspenderIndex = THREADS_COUNT - 1;
+ } else {
+ throw new RuntimeException("Main: wrong argument: " + arg + ", expected: SuspenderIndex={first|last}");
+ }
+ log("Main: suspenderIndex: " + suspenderIndex);
+
+ SuspendWithCurrentThread test = new SuspendWithCurrentThread();
+ test.run();
+ }
+
+ private ThreadToSuspend[] startTestedThreads(int threadsCount) throws RuntimeException {
+ ThreadToSuspend[]threads = new ThreadToSuspend[threadsCount];
+
+ // create tested threads
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new ThreadToSuspend("ThreadToSuspend#" + i,
+ i == suspenderIndex // isSuspender
+ );
+ }
+ log("Main: starting tested threads");
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ if (!threads[i].checkReady()) {
+ throw new RuntimeException("Main: unable to prepare tested thread: " + threads[i]);
+ }
+ }
+ log("Main: tested threads started");
+
+ registerTestedThreads(threads);
+ return threads;
+ }
+
+ private boolean checkSuspendedStatus() throws RuntimeException {
+ log("Main: checking all tested threads have been suspended");
+ return checkTestedThreadsSuspended();
+ }
+
+ /* The test does the following steps:
+ * - main thread starts several (THREADS_COUNT) ThreadToSuspend tested threads
+ * - main thread waits for threads to be ready with the thread.checkReady()
+ * - main thread registers tested threads within the native agent library
+ * with the native method registerTestedThreads()
+ * - main thread triggers the suspender tested thread with the
+ * ThreadToSuspend.setAllThreadsReady() to suspend tested threads
+ * - suspender thread suspends tested threads including itself with the native
+ * method suspendTestedThreads() (uses the JVMTI SuspendThreadList function)
+ * - main thread checks tested threads suspended status with the native method
+ * checkSuspendedStatus(); the tested threads are expected to have suspended status
+ * - main thread resumes tested threads with the native method resumeTestedThreads()
+ * - main thread releases tested threads with the native method releaseTestedThreads()
+ * - main thread triggers the tested threads to finish with the thread.letFinish()
+ */
+ private void run() throws Exception {
+ ThreadToSuspend[] threads = null; // tested threads
+
+ log("Main: started");
+ try {
+ threads = startTestedThreads(THREADS_COUNT);
+
+ log("Main: trigger " + threads[suspenderIndex].getName() +
+ " to suspend all tested threads including itself");
+ ThreadToSuspend.setAllThreadsReady();
+
+ if (!checkSuspendedStatus()) {
+ throw new RuntimeException("Main: FAILED status returned from checkTestedThreadsSuspended");
+ }
+
+ log("Main: resuming all tested threads");
+ resumeTestedThreads();
+ } finally {
+ // let threads to finish
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].letFinish();
+ }
+ log("Main: tested threads finished");
+ }
+
+ // wait for threads to finish
+ log("Main: joining tested threads");
+ try {
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+ log("Main: tested thread joined");
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ log("Main: releasing tested threads native info");
+ releaseTestedThreadsInfo();
+
+ log("Main: finished");
+ }
+}
+
+/* =================================================================== */
+
+// tested threads
+class ThreadToSuspend extends Thread {
+ private static void log(String msg) { System.out.println(msg); }
+
+ private static native void init();
+ private static native void suspendTestedThreads();
+ private static volatile boolean allThreadsReady = false;
+
+ public static void setAllThreadsReady() {
+ allThreadsReady = true;
+ }
+
+ private volatile boolean threadReady = false;
+ private volatile boolean shouldFinish = false;
+ private boolean isSuspender = false;
+
+ // make thread with specific name
+ public ThreadToSuspend(String name, boolean isSuspender) {
+ super(name);
+ this.isSuspender = isSuspender;
+ }
+
+ // run thread continuously
+ public void run() {
+ boolean needSuspend = true;
+
+ if (isSuspender) {
+ init();
+ }
+ threadReady = true;
+
+ // run in a loop
+ while (!shouldFinish) {
+ if (isSuspender && needSuspend && allThreadsReady) {
+ log(getName() + ": before suspending all tested threads including myself");
+ needSuspend = false;
+ suspendTestedThreads();
+ log(getName() + ": after suspending all tested threads including myself");
+ }
+ }
+ }
+
+ // check if thread is ready
+ public boolean checkReady() {
+ try {
+ while (!threadReady) {
+ sleep(1);
+ }
+ } catch (InterruptedException e) {
+ throw new RuntimeException("checkReady: sleep was interrupted\n\t" + e);
+ }
+ return threadReady;
+ }
+
+ // let thread to finish
+ public void letFinish() {
+ shouldFinish = true;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/SuspendWithCurrentThread/libSuspendWithCurrentThread.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <string.h>
+#include "jvmti.h"
+
+extern "C" {
+
+static jvmtiEnv* jvmti = NULL;
+static jthread* threads = NULL;
+static jsize threads_count = 0;
+static jrawMonitorID agent_monitor = NULL;
+
+#define LOG(...) \
+ do { \
+ printf(__VA_ARGS__); \
+ printf("\n"); \
+ fflush(stdout); \
+ } while (0)
+
+static void
+check_jvmti_status(JNIEnv* jni, jvmtiError err, const char* msg) {
+ if (err != JVMTI_ERROR_NONE) {
+ LOG("check_jvmti_status: JVMTI function returned error: %d", err);
+ jni->FatalError(msg);
+ }
+}
+
+static void
+agent_lock(JNIEnv* jni) {
+ jvmtiError err = jvmti->RawMonitorEnter(agent_monitor);
+ check_jvmti_status(jni, err, "monitor_enter: error in JVMTI RawMonitorEnter");
+}
+
+static void
+agent_unlock(JNIEnv* jni) {
+ jvmtiError err = jvmti->RawMonitorExit(agent_monitor);
+ check_jvmti_status(jni, err, "monitor_exit: error in JVMTI RawMonitorExit");
+}
+
+JNIEXPORT void JNICALL
+Java_SuspendWithCurrentThread_registerTestedThreads(JNIEnv *jni, jclass cls, jobjectArray threadsArr) {
+ LOG("\nregisterTestedThreads: started");
+ threads_count = jni->GetArrayLength(threadsArr);
+
+ jvmtiError err = jvmti->Allocate((threads_count * sizeof(jthread)),
+ (unsigned char**)&threads);
+ check_jvmti_status(jni, err, "registerTestedThreads: error in JVMTI Allocate threads array");
+
+ for (int i = 0; i < threads_count; i++) {
+ jobject elem = jni->GetObjectArrayElement(threadsArr, i);
+ threads[i] = (jthread)jni->NewGlobalRef(elem);
+ }
+ LOG("registerTestedThreads: finished\n");
+}
+
+/* This function is executed on the suspender thread, not the Main thread */
+JNIEXPORT void JNICALL
+Java_ThreadToSuspend_init(JNIEnv *jni, jclass cls) {
+ jvmtiError err = jvmti->CreateRawMonitor("Agent monitor", &agent_monitor);
+ check_jvmti_status(jni, err, "Java_ThreadToSuspend_init: error in JVMTI CreateRawMonitor");
+
+ // Main thread has to wait for the suspender thread to complete tested threads suspension
+ agent_lock(jni);
+}
+
+/* This function is executed on the suspender thread which is not Main thread */
+JNIEXPORT void JNICALL
+Java_ThreadToSuspend_suspendTestedThreads(JNIEnv *jni, jclass cls) {
+ jvmtiError* results = NULL;
+ jvmtiError err;
+
+ LOG("\nsuspendTestedThreads: started");
+ err = jvmti->Allocate((threads_count * sizeof(jvmtiError)),
+ (unsigned char**)&results);
+ check_jvmti_status(jni, err, "suspendTestedThreads: error in JVMTI Allocate results array");
+
+ LOG("suspendTestedThreads: before JVMTI SuspendThreadList");
+ err = jvmti->SuspendThreadList(threads_count, threads, results);
+ check_jvmti_status(jni, err, "suspendTestedThreads: error in JVMTI SuspendThreadList");
+
+ LOG("suspendTestedThreads: check and print SuspendThreadList results:");
+ for (int i = 0; i < threads_count; i++) {
+ LOG(" thread #%d: (%d)", i, (int)results[i]);
+ check_jvmti_status(jni, results[i], "suspendTestedThreads: error in SuspendThreadList results[i]");
+ }
+ LOG("suspendTestedThreads: finished\n");
+
+ // Allow the Main thread to inspect the result of tested threads suspension
+ agent_unlock(jni);
+
+ err = jvmti->Deallocate((unsigned char*)results);
+ check_jvmti_status(jni, err, "suspendTestedThreads: error in JVMTI Deallocate results");
+}
+
+JNIEXPORT jboolean JNICALL
+Java_SuspendWithCurrentThread_checkTestedThreadsSuspended(JNIEnv *jni, jclass cls) {
+ LOG("checkTestedThreadsSuspended: started");
+
+ // Block until the suspender thread competes the tested threads suspension
+ agent_lock(jni);
+ agent_unlock(jni);
+
+ for (int i = 0; i < threads_count; i++) {
+ jint state = 0;
+ jvmtiError err = jvmti->GetThreadState(threads[i], &state);
+ check_jvmti_status(jni, err, "checkTestedThreadsSuspended: error in GetThreadState");
+
+ if ((state & JVMTI_THREAD_STATE_SUSPENDED) == 0) {
+ LOG("thread #%d has not been suspended yet: "
+ "# state: (%#x)", i, (int)state);
+ jni->FatalError("checkTestedThreadsSuspended: error: expected all tested threads suspended");
+ }
+ }
+ LOG("checkTestedThreadsSuspended: finished\n");
+ return JNI_TRUE;
+}
+
+JNIEXPORT void JNICALL
+Java_SuspendWithCurrentThread_resumeTestedThreads(JNIEnv *jni, jclass cls) {
+ jvmtiError* results = NULL;
+ jvmtiError err;
+
+ LOG("\nresumeTestedThreads: started");
+ err = jvmti->Allocate((threads_count * sizeof(jvmtiError)),
+ (unsigned char**)&results);
+ check_jvmti_status(jni, err, "resumeTestedThreads: error in JVMTI Allocate results array");
+
+ LOG("resumeTestedThreads: before JVMTI ResumeThreadList");
+ err = jvmti->ResumeThreadList(threads_count, threads, results);
+ check_jvmti_status(jni, err, "resumeTestedThreads: error in ResumeThreadList");
+
+ LOG("resumeTestedThreads: check and print ResumeThreadList results:");
+ for (int i = 0; i < threads_count; i++) {
+ LOG(" thread #%d: (%d)", i, (int)results[i]);
+ check_jvmti_status(jni, results[i], "resumeTestedThreads: error in ResumeThreadList results[i]");
+ }
+
+ err = jvmti->Deallocate((unsigned char*)results);
+ check_jvmti_status(jni, err, "resumeTestedThreads: error in JVMTI Deallocate results");
+
+ LOG("resumeTestedThreads: finished\n");
+}
+
+JNIEXPORT void JNICALL
+Java_SuspendWithCurrentThread_releaseTestedThreadsInfo(JNIEnv *jni, jclass cls) {
+ jvmtiError err;
+
+ LOG("\nreleaseTestedThreadsInfo: started");
+ err = jvmti->DestroyRawMonitor(agent_monitor);
+ check_jvmti_status(jni, err, "releaseTestedThreadsInfo: error in JVMTI DestroyRawMonitor");
+
+ for (int i = 0; i < threads_count; i++) {
+ if (threads[i] != NULL) {
+ jni->DeleteGlobalRef(threads[i]);
+ }
+ }
+ err = jvmti->Deallocate((unsigned char*)threads);
+ check_jvmti_status(jni, err, "releaseTestedThreadsInfo: error in JVMTI Deallocate threads");
+
+ LOG("releaseTestedThreadsInfo: finished\n");
+}
+
+
+/** Agent library initialization. */
+
+JNIEXPORT jint JNICALL
+Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
+ LOG("\nAgent_OnLoad started");
+
+ // create JVMTI environment
+ if (jvm->GetEnv((void **) (&jvmti), JVMTI_VERSION) != JNI_OK) {
+ return JNI_ERR;
+ }
+
+ // add specific capabilities for suspending thread
+ jvmtiCapabilities suspendCaps;
+ memset(&suspendCaps, 0, sizeof(suspendCaps));
+ suspendCaps.can_suspend = 1;
+
+ jvmtiError err = jvmti->AddCapabilities(&suspendCaps);
+ if (err != JVMTI_ERROR_NONE) {
+ return JNI_ERR;
+ }
+ LOG("Agent_OnLoad finished\n");
+ return JNI_OK;
+}
+
+}
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbCDSCore.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbCDSCore.java Fri Oct 11 12:08:01 2019 +0530
@@ -34,26 +34,30 @@
* @run main/othervm/timeout=2400 -Xmx1g ClhsdbCDSCore
*/
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.HashMap;
-import jdk.test.lib.process.ProcessTools;
-import jdk.test.lib.Platform;
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.cds.CDSTestUtils;
-import jdk.test.lib.cds.CDSOptions;
+import java.io.File;
import java.io.IOException;
-import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import jdk.test.lib.Asserts;
-import java.util.regex.Pattern;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Scanner;
import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
import jdk.internal.misc.Unsafe;
-import java.util.Scanner;
+
+import jdk.test.lib.Asserts;
+import jdk.test.lib.Platform;
+import jdk.test.lib.cds.CDSOptions;
+import jdk.test.lib.cds.CDSTestUtils;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.SA.SATestUtils;
+
import jtreg.SkippedException;
class CrashApp {
@@ -102,6 +106,7 @@
System.out.println(crashOut.getOutput());
String crashOutputString = crashOut.getOutput();
+ SATestUtils.unzipCores(new File("."));
String coreFileLocation = getCoreFileLocation(crashOutputString);
if (coreFileLocation == null) {
if (Platform.isOSX()) {
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbLauncher.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLauncher.java Fri Oct 11 12:08:01 2019 +0530
@@ -140,6 +140,7 @@
for (String cmd : commands) {
int index = commands.indexOf(cmd) + 1;
OutputAnalyzer out = new OutputAnalyzer(parts[index]);
+ out.shouldNotMatch("Unrecognized command.");
if (expectedStrMap != null) {
List<String> expectedStr = expectedStrMap.get(cmd);
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbPmap.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbPmap.java Fri Oct 11 12:08:01 2019 +0530
@@ -52,9 +52,8 @@
List<String> cmds = List.of("pmap");
Map<String, List<String>> expStrMap = new HashMap<>();
- expStrMap.put("pmap", List.of(
- "jvm", "java", "net", "nio",
- "jimage", "zip", "verify"));
+ expStrMap.put("pmap",
+ List.of("jvm", "java", "net", "nio", "jimage", "zip"));
test.run(theApp.getPid(), cmds, expStrMap, null);
} catch (SkippedException se) {
--- a/test/hotspot/jtreg/serviceability/sa/TestJmapCore.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/serviceability/sa/TestJmapCore.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,19 +29,20 @@
* @run driver/timeout=240 TestJmapCore run heap
*/
+import java.io.File;
+
import jdk.test.lib.Asserts;
import jdk.test.lib.JDKToolFinder;
import jdk.test.lib.JDKToolLauncher;
import jdk.test.lib.Platform;
+import jdk.test.lib.Utils;
import jdk.test.lib.classloader.GeneratingClassLoader;
import jdk.test.lib.hprof.HprofParser;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.Utils;
+import jdk.test.lib.SA.SATestUtils;
import jtreg.SkippedException;
-import java.io.File;
-
public class TestJmapCore {
static final String pidSeparator = ":KILLED_PID";
@@ -97,9 +98,11 @@
? ProcessTools.executeProcess(pb)
: ProcessTools.executeProcess("sh", "-c", "ulimit -c unlimited && "
+ ProcessTools.getCommandLine(pb));
+ File pwd = new File(".");
+ SATestUtils.unzipCores(pwd);
File core;
String pattern = Platform.isWindows() ? ".*\\.mdmp" : "core(\\.\\d+)?";
- File[] cores = new File(".").listFiles((dir, name) -> name.matches(pattern));
+ File[] cores = pwd.listFiles((dir, name) -> name.matches(pattern));
if (cores.length == 0) {
// /cores/core.$pid might be generated on macosx by default
String pid = output.firstMatch("^(\\d+)" + pidSeparator, 1);
@@ -110,7 +113,7 @@
} else {
Asserts.assertTrue(cores.length == 1,
"There are unexpected files containing core "
- + ": " + String.join(",", new File(".").list()) + ".");
+ + ": " + String.join(",", pwd.list()) + ".");
core = cores[0];
}
System.out.println("Found corefile: " + core.getAbsolutePath());
--- a/test/hotspot/jtreg/serviceability/sa/TestUniverse.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/serviceability/sa/TestUniverse.java Fri Oct 11 12:08:01 2019 +0530
@@ -86,7 +86,7 @@
break;
case Shenandoah:
- expStrings.add("Shenandoah Heap");
+ expStrings.add("Shenandoah heap");
break;
}
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/HighWaterMarkTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/HighWaterMarkTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,6 @@
* Test verifies that MinMetaspaceFreeRatio/MaxMetaspaceFreeRatio settings
* affect the frequency of GC. (High-water mark)
*
- * Note: The test doesn't check the GC count if CMS is used.
- *
* Quoting: Java SE 8 HotSpot[tm] Virtual Machine Garbage Collection Tuning
* <pre>
* Class metadata is deallocated when the corresponding Java class is unloaded.
@@ -175,11 +173,6 @@
throw new Fault("Committed amount hasn't achieved " + bytes2k(committedLevel));
}
- if (VMRuntimeEnvUtils.isVMOptionEnabled("UseConcMarkSweepGC")) {
- System.out.println("ConcMarkSweep is used, cannot count GC");
- return;
- }
-
int gcCount = getMetaspaceGCCount();
if (gcCount < 0) {
// perhpas, it's better to silently pass here... Let's see.
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_0_1/TestDescription.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_0_1/TestDescription.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,11 @@
* @summary converted from VM Testbase metaspace/gc/watermark_0_1.
* VM Testbase keywords: [nonconcurrent, no_cds]
*
+ * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
+ * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "ConcMarkSweep"
+ * @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
* @run main/othervm
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_10_20/TestDescription.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_10_20/TestDescription.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,11 @@
* @summary converted from VM Testbase metaspace/gc/watermark_10_20.
* VM Testbase keywords: [nonconcurrent, no_cds]
*
+ * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
+ * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "ConcMarkSweep"
+ * @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
* @run main/othervm
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_70_80/TestDescription.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_70_80/TestDescription.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,11 @@
* @summary converted from VM Testbase metaspace/gc/watermark_70_80.
* VM Testbase keywords: [nonconcurrent, no_cds]
*
+ * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
+ * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "ConcMarkSweep"
+ * @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
* @run main/othervm
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_99_100/TestDescription.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_99_100/TestDescription.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,11 @@
* @summary converted from VM Testbase metaspace/gc/watermark_99_100.
* VM Testbase keywords: [nonconcurrent, no_cds]
*
+ * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
+ * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
+ * @requires vm.gc != "ConcMarkSweep"
+ * @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
* @run main/othervm
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/Allocate/alloc001/TestDescription.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/Allocate/alloc001/TestDescription.java Fri Oct 11 12:08:01 2019 +0530
@@ -42,6 +42,8 @@
* @library /vmTestbase
* /test/lib
* @requires os.family != "aix"
+ * @comment Test is incompatible with ZGC, due to ZGC's address space requirements.
+ * @requires vm.gc != "Z"
* @run driver jdk.test.lib.FileInstaller . .
* @build nsk.jvmti.Allocate.alloc001
* @run shell alloc001.sh
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/RawMonitorWait/rawmnwait005/rawmnwait005.cpp Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/RawMonitorWait/rawmnwait005/rawmnwait005.cpp Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
static jvmtiEnv *jvmti = NULL;
static jvmtiCapabilities caps;
static jint result = PASSED;
-static jboolean printdump = JNI_FALSE;
+static jboolean printdump = JNI_TRUE;
static jrawMonitorID monitor;
static jrawMonitorID wait_lock;
static jlong wait_time;
@@ -100,6 +100,8 @@
jvmtiError err;
const char* const thread_name = "test thread";
+ // Once we hold this monitor we know we can't get interrupted
+ // until we have called wait().
err = jvmti->RawMonitorEnter(monitor);
if (err != JVMTI_ERROR_NONE) {
printf("(RawMonitorEnter#test) unexpected error: %s (%d)\n",
@@ -110,6 +112,7 @@
printf(">>> [%s] acquired lock for 'monitor' ...\n", thread_name);
}
+ // We can't get this monitor until the main thread has called wait() on it.
err = jvmti->RawMonitorEnter(wait_lock);
if (err != JVMTI_ERROR_NONE) {
printf("(RawMonitorEnter#wait) unexpected error: %s (%d)\n",
@@ -156,6 +159,36 @@
result = STATUS_FAILED;
}
+ // We can't reacquire this monitor until the main thread is waiting for us to
+ // complete.
+ err = jvmti->RawMonitorEnter(wait_lock);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("(RawMonitorEnter#wait) unexpected error: %s (%d)\n",
+ TranslateError(err), err);
+ result = STATUS_FAILED;
+ return;
+ }
+
+ if (printdump == JNI_TRUE) {
+ printf(">>> [%s] acquired lock for 'wait_lock' ...\n", thread_name);
+ printf(">>> [%s] notifying main thread we are done ...\n", thread_name);
+ }
+
+ err = jvmti->RawMonitorNotify(wait_lock);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("(RawMonitorWait#wait) unexpected error: %s (%d)\n",
+ TranslateError(err), err);
+ result = STATUS_FAILED;
+ return;
+ }
+ err = jvmti->RawMonitorExit(wait_lock);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("(RawMonitorExit#wait) unexpected error: %s (%d)\n",
+ TranslateError(err), err);
+ result = STATUS_FAILED;
+ return;
+ }
+
if (printdump == JNI_TRUE) {
printf(">>> [%s] all done\n", thread_name);
}
@@ -223,6 +256,11 @@
if (printdump == JNI_TRUE) {
printf(">>> [%s] starting test thread ...\n", thread_name);
}
+
+ // This starts a daemon thread, so we need to synchronize with it
+ // before we terminate - else the test will end before it checks
+ // it was interrupted!
+
err = jvmti->RunAgentThread(thr, test_thread, NULL,
JVMTI_THREAD_NORM_PRIORITY);
if (err != JVMTI_ERROR_NONE) {
@@ -244,12 +282,7 @@
printf(">>> [%s] got notification from test thread ...\n", thread_name);
}
- err = jvmti->RawMonitorExit(wait_lock);
- if (err != JVMTI_ERROR_NONE) {
- printf("(RawMonitorExit#wait) unexpected error: %s (%d)\n",
- TranslateError(err), err);
- return STATUS_FAILED;
- }
+ // Keep the wait_lock so we can wait again at the end.
err = jvmti->RawMonitorEnter(monitor);
if (err != JVMTI_ERROR_NONE) {
@@ -280,6 +313,26 @@
}
if (printdump == JNI_TRUE) {
+ printf(">>> [%s] waiting for test thread to complete its wait and notify us ...\n", thread_name);
+ }
+ err = jvmti->RawMonitorWait(wait_lock, (jlong)0);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("(RawMonitorWait#wait) unexpected error: %s (%d)\n",
+ TranslateError(err), err);
+ return STATUS_FAILED;
+ }
+ if (printdump == JNI_TRUE) {
+ printf(">>> [%s] got final notification from test thread ...\n", thread_name);
+ }
+
+ err = jvmti->RawMonitorExit(wait_lock);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("(RawMonitorExit#wait) unexpected error: %s (%d)\n",
+ TranslateError(err), err);
+ return STATUS_FAILED;
+ }
+
+ if (printdump == JNI_TRUE) {
printf(">>> [%s] all done\n", thread_name);
}
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/ResourceExhausted/resexhausted002/TestDescription.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/ResourceExhausted/resexhausted002/TestDescription.java Fri Oct 11 12:08:01 2019 +0530
@@ -40,8 +40,8 @@
* @run driver jdk.test.lib.FileInstaller . .
* @run main/othervm/native
* -agentlib:resexhausted=-waittime=5
- * -Xms8m
- * -Xmx8m
+ * -Xms128m
+ * -Xmx128m
* -XX:-UseGCOverheadLimit
* nsk.jvmti.ResourceExhausted.resexhausted002
*/
--- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/ThreadMXBean/GetThreadAllocatedBytes/BaseBehaviorTest.README Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/ThreadMXBean/GetThreadAllocatedBytes/BaseBehaviorTest.README Fri Oct 11 12:08:01 2019 +0530
@@ -1,4 +1,4 @@
-Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@@ -21,7 +21,8 @@
DESCRIPTION
- Tests getThreadAllocatedBytes(long id) and getThreadAllocatedBytes(long[] ids),
+ Tests getCurrentThreadAllocatedBytes(), getThreadAllocatedBytes(long id),
+ and getThreadAllocatedBytes(long[] ids),
functions of com.sun.management.ThreadMXBean
All methods should
--- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/ThreadMXBean/GetThreadAllocatedBytes/BaseBehaviorTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/ThreadMXBean/GetThreadAllocatedBytes/BaseBehaviorTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,8 @@
import nsk.monitoring.ThreadMXBean.*;
/**
- * Tests getThreadAllocatedBytes(long id) and getThreadAllocatedBytes(long[] ids),
+ * Tests getCurrentThreadAllocatedBytes(), getThreadAllocatedBytes(long id).
+ * and getThreadAllocatedBytes(long[] ids),
* functions of com.sun.management.ThreadMXBean
* <p>
* All methods should
@@ -49,11 +50,31 @@
public void run() {
if (threadMXBean == null)
return;
+
+ // Expect -1 if thread allocated memory is disabled
+ threadMXBean.setThreadAllocatedMemoryEnabled(false);
+ long result = threadMXBean.getCurrentThreadAllocatedBytes();
+ if (result != -1)
+ throw new TestFailure("Failure! getCurrentThreadAllocatedBytes() should "
+ + "return -1 if ThreadAllocatedMemoryEnabled is set to false. "
+ + "Received : " + result);
+ threadMXBean.setThreadAllocatedMemoryEnabled(true);
+ // Expect >= 0 value for current thread
+ result = threadMXBean.getCurrentThreadAllocatedBytes();
+ if (result < 0)
+ throw new TestFailure("Failure! getCurrentThreadAllocatedBytes() should "
+ + "return >= 0 value for current thread. Received : " + result);
+ // Expect >= 0 value for current thread from getThreadAllocatedBytes(id)
+ result = threadMXBean.getThreadAllocatedBytes(Thread.currentThread().getId());
+ if (result < 0)
+ throw new TestFailure("Failure! getThreadAllocatedBytes(id) should "
+ + "return >= 0 value for current thread. Received : " + result);
+
MXBeanTestThread thread = new MXBeanTestThread();
long id = thread.getId();
long[] idArr = new long[] { id };
- long result;
long[] resultArr;
+
// Expect -1 for not started threads
result = threadMXBean.getThreadAllocatedBytes(id);
if (result != -1)
@@ -80,7 +101,7 @@
+ "Recieved : " + resultArr[0]);
threadMXBean.setThreadAllocatedMemoryEnabled(true);
- // Expect > 0 value for running threads
+ // Expect >= 0 value for running threads
result = threadMXBean.getThreadAllocatedBytes(id);
if (result < 0)
throw new TestFailure("Failure! getThreadAllocatedBytes(long id) should "
--- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/server/ServerThreadMXBeanNew.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/server/ServerThreadMXBeanNew.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,10 @@
new String[] { long.class.getName() });
}
+ public long getCurrentThreadAllocatedBytes() {
+ return getLongAttribute("CurrentThreadAllocatedBytes");
+ }
+
public void setThreadAllocatedMemoryEnabled(boolean enabled) {
setBooleanAttribute("ThreadAllocatedMemoryEnabled", enabled);
}
--- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/thread/SynchronizerLockingThreads.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/thread/SynchronizerLockingThreads.java Fri Oct 11 12:08:01 2019 +0530
@@ -243,7 +243,7 @@
protected boolean isStackTraceElementExpected(StackTraceElement element) {
return super.isStackTraceElementExpected(element) ||
checkStackTraceElement(element, expectedMethodsThread2) ||
- element.getClassName().startsWith("java.util.concurrent.locks.") ||
+ element.getClassName().startsWith("java.util.concurrent.") ||
element.getClassName().startsWith("jdk.internal.misc.");
}
}
--- a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/Binder.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/Binder.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -201,8 +201,6 @@
debugee = localLaunchDebugee(vmm, classToExecute, classPath);
} else if (argumentHandler.isAttachingConnector()) {
debugee = localLaunchAndAttachDebugee(vmm, classToExecute, classPath);
- } else if (argumentHandler.isLaunchingConnector()) {
- debugee = localLaunchDebugee(vmm, classToExecute, classPath);
} else if (argumentHandler.isListeningConnector()) {
debugee = localLaunchAndListenDebugee(vmm, classToExecute, classPath);
} else {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jaxp/javax/xml/jaxp/unittest/transform/StAX2DOMTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package transform;
+
+import javax.xml.stream.XMLInputFactory;
+import javax.xml.stream.XMLStreamConstants;
+import javax.xml.stream.XMLStreamReader;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMResult;
+import javax.xml.transform.stax.StAXSource;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+import org.w3c.dom.Node;
+
+/*
+ * @test
+ * @bug 8016914
+ * @library /javax/xml/jaxp/libs /javax/xml/jaxp/unittest
+ * @run testng transform.StAX2DOMTest
+ * @summary Verifies transforming a StAXSource to a DOMResult.
+ */
+public class StAX2DOMTest {
+ /**
+ * Data files for test.
+ * Column(s): xml file
+ *
+ * @return data for test
+ */
+ @DataProvider(name = "datafiles")
+ public Object[][] getData() {
+ return new Object[][] {
+ { "StAX2DOMTest.xml"}, //without declaration
+ { "StAX2DOMTest1.xml"}, //with declaration
+ };
+ }
+
+ /**
+ * Verifies that transforming a StAX source to a DOM result passes with
+ * or without the XML declaration.
+ *
+ * @param file the XML file
+ * @throws Exception if the test fails
+ */
+ @Test(dataProvider = "datafiles")
+ public void test(String file) throws Exception {
+ final XMLInputFactory xif = XMLInputFactory.newInstance();
+ final XMLStreamReader xsr = xif.createXMLStreamReader(
+ this.getClass().getResourceAsStream(file));
+ xsr.nextTag(); // Advance to statements element
+
+ final TransformerFactory tf = TransformerFactory.newInstance();
+ final Transformer t = tf.newTransformer();
+ while(xsr.nextTag() == XMLStreamConstants.START_ELEMENT) {
+ final DOMResult result = new DOMResult();
+ t.transform(new StAXSource(xsr), result);
+ final Node domNode = result.getNode();
+ System.out.println(domNode);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jaxp/javax/xml/jaxp/unittest/transform/StAX2DOMTest.xml Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,119 @@
+<catalog>
+ <book id= " bk101 " >
+ <author>Gambardella, Matthew</author>
+ <title>XML Developer's Guide</title>
+ <genre>Computer</genre>
+ <price>44.95</price>
+ <publish_date>2000-10-01</publish_date>
+ <description>An in-depth look at creating applications
+ with XML.</description>
+ </book>
+ <book id= " bk102 " >
+ <author>Ralls, Kim</author>
+ <title>Midnight Rain</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2000-12-16</publish_date>
+ <description>A former architect battles corporate zombies,
+ an evil sorceress, and her own childhood to become queen
+ of the world.</description>
+ </book>
+ <book id= " bk103 " >
+ <author>Corets, Eva</author>
+ <title>Maeve Ascendant</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2000-11-17</publish_date>
+ <description>After the collapse of a nanotechnology
+ society in England, the young survivors lay the
+ foundation for a new society.</description>
+ </book>
+ <book id= " bk104 " >
+ <author>Corets, Eva</author>
+ <title>Oberon's Legacy</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2001-03-10</publish_date>
+ <description>In post-apocalypse England, the mysterious
+ agent known only as Oberon helps to create a new life
+ for the inhabitants of London. Sequel to Maeve
+ Ascendant.</description>
+ </book>
+ <book id= " bk105 " >
+ <author>Corets, Eva</author>
+ <title>The Sundered Grail</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2001-09-10</publish_date>
+ <description>The two daughters of Maeve, half-sisters,
+ battle one another for control of England. Sequel to
+ Oberon's Legacy.</description>
+ </book>
+ <book id= " bk106 " >
+ <author>Randall, Cynthia</author>
+ <title>Lover Birds</title>
+ <genre>Romance</genre>
+ <price>4.95</price>
+ <publish_date>2000-09-02</publish_date>
+ <description>When Carla meets Paul at an ornithology
+ conference, tempers fly as feathers get ruffled.</description>
+ </book>
+ <book id= " bk107 " >
+ <author>Thurman, Paula</author>
+ <title>Splish Splash</title>
+ <genre>Romance</genre>
+ <price>4.95</price>
+ <publish_date>2000-11-02</publish_date>
+ <description>A deep sea diver finds true love twenty
+ thousand leagues beneath the sea.</description>
+ </book>
+ <book id= " bk108 " >
+ <author>Knorr, Stefan</author>
+ <title>Creepy Crawlies</title>
+ <genre>Horror</genre>
+ <price>4.95</price>
+ <publish_date>2000-12-06</publish_date>
+ <description>An anthology of horror stories about roaches,
+ centipedes, scorpions and other insects.</description>
+ </book>
+ <book id= " bk109 " >
+ <author>Kress, Peter</author>
+ <title>Paradox Lost</title>
+ <genre>Science Fiction</genre>
+ <price>6.95</price>
+ <publish_date>2000-11-02</publish_date>
+ <description>After an inadvertant trip through a Heisenberg
+ Uncertainty Device, James Salway discovers the problems
+ of being quantum.</description>
+ </book>
+ <book id= " bk110 " >
+ <author>O'Brien, Tim</author>
+ <title>Microsoft .NET: The Programming Bible</title>
+ <genre>Computer</genre>
+ <price>36.95</price>
+ <publish_date>2000-12-09</publish_date>
+ <description>Microsoft's .NET initiative is explored in
+ detail in this deep programmer's reference.</description>
+ </book>
+ <book id= " bk111 " >
+ <author>O'Brien, Tim</author>
+ <title>MSXML3: A Comprehensive Guide</title>
+ <genre>Computer</genre>
+ <price>36.95</price>
+ <publish_date>2000-12-01</publish_date>
+ <description>The Microsoft MSXML3 parser is covered in
+ detail, with attention to XML DOM interfaces, XSLT processing,
+ SAX and more.</description>
+ </book>
+ <book id= " bk112 " >
+ <author>Galos, Mike</author>
+ <title>Visual Studio 7: A Comprehensive Guide</title>
+ <genre>Computer</genre>
+ <price>49.95</price>
+ <publish_date>2001-04-16</publish_date>
+ <description>Microsoft Visual Studio 7 is explored in depth,
+ looking at how Visual Basic, Visual C++, C#, and ASP+ are
+ integrated into a comprehensive development
+ environment.</description>
+ </book>
+</catalog>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jaxp/javax/xml/jaxp/unittest/transform/StAX2DOMTest1.xml Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<catalog>
+ <book id= " bk101 " >
+ <author>Gambardella, Matthew</author>
+ <title>XML Developer's Guide</title>
+ <genre>Computer</genre>
+ <price>44.95</price>
+ <publish_date>2000-10-01</publish_date>
+ <description>An in-depth look at creating applications
+ with XML.</description>
+ </book>
+ <book id= " bk102 " >
+ <author>Ralls, Kim</author>
+ <title>Midnight Rain</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2000-12-16</publish_date>
+ <description>A former architect battles corporate zombies,
+ an evil sorceress, and her own childhood to become queen
+ of the world.</description>
+ </book>
+ <book id= " bk103 " >
+ <author>Corets, Eva</author>
+ <title>Maeve Ascendant</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2000-11-17</publish_date>
+ <description>After the collapse of a nanotechnology
+ society in England, the young survivors lay the
+ foundation for a new society.</description>
+ </book>
+ <book id= " bk104 " >
+ <author>Corets, Eva</author>
+ <title>Oberon's Legacy</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2001-03-10</publish_date>
+ <description>In post-apocalypse England, the mysterious
+ agent known only as Oberon helps to create a new life
+ for the inhabitants of London. Sequel to Maeve
+ Ascendant.</description>
+ </book>
+ <book id= " bk105 " >
+ <author>Corets, Eva</author>
+ <title>The Sundered Grail</title>
+ <genre>Fantasy</genre>
+ <price>5.95</price>
+ <publish_date>2001-09-10</publish_date>
+ <description>The two daughters of Maeve, half-sisters,
+ battle one another for control of England. Sequel to
+ Oberon's Legacy.</description>
+ </book>
+ <book id= " bk106 " >
+ <author>Randall, Cynthia</author>
+ <title>Lover Birds</title>
+ <genre>Romance</genre>
+ <price>4.95</price>
+ <publish_date>2000-09-02</publish_date>
+ <description>When Carla meets Paul at an ornithology
+ conference, tempers fly as feathers get ruffled.</description>
+ </book>
+ <book id= " bk107 " >
+ <author>Thurman, Paula</author>
+ <title>Splish Splash</title>
+ <genre>Romance</genre>
+ <price>4.95</price>
+ <publish_date>2000-11-02</publish_date>
+ <description>A deep sea diver finds true love twenty
+ thousand leagues beneath the sea.</description>
+ </book>
+ <book id= " bk108 " >
+ <author>Knorr, Stefan</author>
+ <title>Creepy Crawlies</title>
+ <genre>Horror</genre>
+ <price>4.95</price>
+ <publish_date>2000-12-06</publish_date>
+ <description>An anthology of horror stories about roaches,
+ centipedes, scorpions and other insects.</description>
+ </book>
+ <book id= " bk109 " >
+ <author>Kress, Peter</author>
+ <title>Paradox Lost</title>
+ <genre>Science Fiction</genre>
+ <price>6.95</price>
+ <publish_date>2000-11-02</publish_date>
+ <description>After an inadvertant trip through a Heisenberg
+ Uncertainty Device, James Salway discovers the problems
+ of being quantum.</description>
+ </book>
+ <book id= " bk110 " >
+ <author>O'Brien, Tim</author>
+ <title>Microsoft .NET: The Programming Bible</title>
+ <genre>Computer</genre>
+ <price>36.95</price>
+ <publish_date>2000-12-09</publish_date>
+ <description>Microsoft's .NET initiative is explored in
+ detail in this deep programmer's reference.</description>
+ </book>
+ <book id= " bk111 " >
+ <author>O'Brien, Tim</author>
+ <title>MSXML3: A Comprehensive Guide</title>
+ <genre>Computer</genre>
+ <price>36.95</price>
+ <publish_date>2000-12-01</publish_date>
+ <description>The Microsoft MSXML3 parser is covered in
+ detail, with attention to XML DOM interfaces, XSLT processing,
+ SAX and more.</description>
+ </book>
+ <book id= " bk112 " >
+ <author>Galos, Mike</author>
+ <title>Visual Studio 7: A Comprehensive Guide</title>
+ <genre>Computer</genre>
+ <price>49.95</price>
+ <publish_date>2001-04-16</publish_date>
+ <description>Microsoft Visual Studio 7 is explored in depth,
+ looking at how Visual Basic, Visual C++, C#, and ASP+ are
+ integrated into a comprehensive development
+ environment.</description>
+ </book>
+</catalog>
--- a/test/jdk/ProblemList.txt Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/ProblemList.txt Fri Oct 11 12:08:01 2019 +0530
@@ -250,7 +250,7 @@
java/awt/font/TextLayout/TextLayoutBounds.java 8169188 generic-all
java/awt/font/StyledMetrics/BoldSpace.java 8198422 linux-all
java/awt/FontMetrics/FontCrash.java 8198336 windows-all
-java/awt/FontMetrics/MaxAdvanceIsMax.java 8221305 solaris-all,macosx-all
+java/awt/FontMetrics/MaxAdvanceIsMax.java 8221305,8231495 solaris-all,macosx-all,linux-all
java/awt/image/DrawImage/IncorrectAlphaSurface2SW.java 8056077 generic-all
java/awt/image/DrawImage/IncorrectClipXorModeSW2Surface.java 8196025 windows-all
java/awt/image/DrawImage/IncorrectClipXorModeSurface2Surface.java 8196025 windows-all
@@ -564,8 +564,6 @@
javax/management/monitor/DerivedGaugeMonitorTest.java 8042211 generic-all
javax/management/remote/mandatory/connection/MultiThreadDeadLockTest.java 8042215 generic-all
-java/lang/management/ThreadMXBean/LockedSynchronizers.java 8231032 generic-all
-
############################################################################
# jdk_io
@@ -863,6 +861,8 @@
sun/tools/jstat/jstatClassloadOutput1.sh 8173942 generic-all
sun/tools/jhsdb/BasicLauncherTest.java 8193639,8211767 solaris-all,linux-ppc64,linux-ppc64le
sun/tools/jhsdb/HeapDumpTest.java 8193639 solaris-all
+sun/tools/jhsdb/HeapDumpTestWithActiveProcess.java 8230731,8001227 windows-all
+sun/tools/jhsdb/HeapDumpTestWithActiveProcess.java 8231635,8231634 generic-all
############################################################################
@@ -884,7 +884,6 @@
jdk/jfr/event/io/EvilInstrument.java 8221331 generic-all
jdk/jfr/event/runtime/TestNetworkUtilizationEvent.java 8228990,8229370 generic-all
jdk/jfr/event/compiler/TestCodeSweeper.java 8225209 generic-all
-jdk/jfr/jcmd/TestJcmdConfigure.java 8231317 windows-x64
############################################################################
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/com/sun/jdi/JdbStopInNotificationThreadTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Tests that the breakpoint in the notification listener is hit when the
+ * notification thread is enabled and is not hit when the notification thread is disabled
+ * (the service thread delivers the notifications in this case).
+ *
+ * @library /test/lib
+ * @run compile -g JdbStopInNotificationThreadTest.java
+ * @run main/othervm JdbStopInNotificationThreadTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import lib.jdb.JdbCommand;
+import lib.jdb.JdbTest;
+
+import javax.management.Notification;
+import javax.management.NotificationEmitter;
+import javax.management.NotificationListener;
+import java.lang.management.*;
+import java.util.Collection;
+import java.util.LinkedList;
+
+class JdbStopInNotificationThreadTestTarg {
+
+ private static volatile boolean done = false;
+
+ private static final MemoryPoolMXBean tenuredGenPool =
+ findTenuredGenPool();
+
+ public static void main(String[] args) throws Exception {
+ test(); // @1 breakpoint
+ }
+
+ private static void test() throws Exception {
+ setPercentageUsageThreshold(0.1);
+ MemoryMXBean mbean = ManagementFactory.getMemoryMXBean();
+ NotificationEmitter emitter = (NotificationEmitter) mbean;
+ emitter.addNotificationListener(new NotificationListener() {
+ public void handleNotification(Notification n, Object hb) {
+ System.out.println("Notification received:" + n.getType());
+ if (n.getType().equals(
+ MemoryNotificationInfo.MEMORY_THRESHOLD_EXCEEDED)) {
+ done = true;
+ System.out.println("Notification MEMORY_THRESHOLD_EXCEEDED received:");
+ long maxMemory = tenuredGenPool.getUsage().getMax();
+ long usedMemory = tenuredGenPool.getUsage().getUsed();
+ System.out.println("Memory usage low!!!"); // @2 breakpoint
+ double percentageUsed = ((double) usedMemory) / maxMemory;
+ System.out.println("percentageUsed = " + percentageUsed);
+ }
+ }
+ }, null, null);
+
+ Collection<Object[]> numbers = new LinkedList();
+ long counter = 0;
+ while (!done) {
+ numbers.add(new Object[1000]);
+ counter++;
+ if (counter % 1000 == 0) {
+ Thread.sleep(100);
+ }
+ }
+ System.out.println("Done");
+ }
+
+ private static MemoryPoolMXBean findTenuredGenPool() {
+ for (MemoryPoolMXBean pool :
+ ManagementFactory.getMemoryPoolMXBeans()) {
+ if (pool.getType() == MemoryType.HEAP &&
+ pool.isUsageThresholdSupported()) {
+ return pool;
+ }
+ }
+ throw new RuntimeException("Could not find tenured space");
+ }
+
+ public static void setPercentageUsageThreshold(double percentage) {
+ if (percentage <= 0.0 || percentage > 1.0) {
+ throw new IllegalArgumentException("Percentage not in range");
+ }
+ System.out.println("Setting threashold for pool " + tenuredGenPool.getName() + " percentage:" + percentage);
+ long maxMemory = tenuredGenPool.getUsage().getMax();
+ long warningThreshold = (long) (maxMemory * percentage);
+ tenuredGenPool.setUsageThreshold(warningThreshold);
+ }
+}
+
+public class JdbStopInNotificationThreadTest extends JdbTest {
+
+ private static final String DEBUGGEE_CLASS = JdbStopInNotificationThreadTestTarg.class.getName();
+ private static final String PATTERN1_TEMPLATE = "^Breakpoint hit: \"thread=Notification Thread\", " +
+ "JdbStopInNotificationThreadTestTarg\\$1\\.handleNotification\\(\\), line=%LINE_NUMBER.*\\R%LINE_NUMBER\\s+System\\.out\\.println\\(\"Memory usage low!!!\"\\);.*";
+
+ private JdbStopInNotificationThreadTest() {
+ super(DEBUGGEE_CLASS);
+ }
+
+ public static void main(String argv[]) {
+ new JdbStopInNotificationThreadTest().run();
+ }
+
+ @Override
+ protected void runCases() {
+ if (isNotificationThreadDisabled()) {
+ System.out.println("Notification Thread is disabled. Skipping the test");
+ return;
+ }
+ int bpLine2 = parseBreakpoints(getTestSourcePath("JdbStopInNotificationThreadTest.java"), 2).get(0);
+ jdb.command(JdbCommand.stopAt(DEBUGGEE_CLASS + "$1", bpLine2));
+ String pattern = PATTERN1_TEMPLATE.replaceAll("%LINE_NUMBER", String.valueOf(bpLine2));
+ jdb.command(JdbCommand.cont());
+ new OutputAnalyzer(jdb.getJdbOutput()).shouldMatch(pattern);
+ }
+
+ private boolean isNotificationThreadDisabled() {
+ int bpLine1 = parseBreakpoints(getTestSourcePath("JdbStopInNotificationThreadTest.java"), 1).get(0);
+ jdb.command(JdbCommand.stopAt(DEBUGGEE_CLASS, bpLine1));
+ jdb.command(JdbCommand.run());
+ jdb.command(JdbCommand.threads());
+ if (new OutputAnalyzer(jdb.getJdbOutput()).getOutput().contains("Notification Thread")) {
+ return false;
+ }
+ return true;
+ }
+}
--- a/test/jdk/com/sun/management/ThreadMXBean/ThreadAllocatedMemory.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/com/sun/management/ThreadMXBean/ThreadAllocatedMemory.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 6173675
+ * @bug 6173675 8231209
* @summary Basic test of ThreadMXBean.getThreadAllocatedBytes
* @author Paul Hohensee
*/
@@ -33,9 +33,8 @@
public class ThreadAllocatedMemory {
private static com.sun.management.ThreadMXBean mbean =
(com.sun.management.ThreadMXBean)ManagementFactory.getThreadMXBean();
- private static boolean testFailed = false;
- private static boolean done = false;
- private static boolean done1 = false;
+ private static volatile boolean done = false;
+ private static volatile boolean done1 = false;
private static Object obj = new Object();
private static final int NUM_THREADS = 10;
private static Thread[] threads = new Thread[NUM_THREADS];
@@ -44,6 +43,22 @@
public static void main(String[] argv)
throws Exception {
+ testSupportEnableDisable();
+
+ // Test current thread two ways
+ testGetCurrentThreadAllocatedBytes();
+ testCurrentThreadGetThreadAllocatedBytes();
+
+ // Test a single thread that is not this one
+ testGetThreadAllocatedBytes();
+
+ // Test many threads that are not this one
+ testGetThreadsAllocatedBytes();
+
+ System.out.println("Test passed");
+ }
+
+ private static void testSupportEnableDisable() {
if (!mbean.isThreadAllocatedMemorySupported()) {
return;
}
@@ -58,10 +73,7 @@
"ThreadAllocatedMemory is expected to be disabled");
}
- Thread curThread = Thread.currentThread();
- long id = curThread.getId();
-
- long s = mbean.getThreadAllocatedBytes(id);
+ long s = mbean.getCurrentThreadAllocatedBytes();
if (s != -1) {
throw new RuntimeException(
"Invalid ThreadAllocatedBytes returned = " +
@@ -77,63 +89,106 @@
throw new RuntimeException(
"ThreadAllocatedMemory is expected to be enabled");
}
+ }
+
+ private static void testGetCurrentThreadAllocatedBytes() {
+ long size = mbean.getCurrentThreadAllocatedBytes();
+ ensureValidSize(size);
+
+ // do some more allocation
+ doit();
+
+ checkResult(Thread.currentThread(), size,
+ mbean.getCurrentThreadAllocatedBytes());
+ }
+
+ private static void testCurrentThreadGetThreadAllocatedBytes() {
+ Thread curThread = Thread.currentThread();
+ long id = curThread.getId();
long size = mbean.getThreadAllocatedBytes(id);
- // implementation could have started measurement when
- // measurement was enabled, in which case size can be 0
- if (size < 0) {
- throw new RuntimeException(
- "Invalid allocated bytes returned = " + size);
- }
+ ensureValidSize(size);
+ // do some more allocation
doit();
- // Expected to be size1 >= size
- long size1 = mbean.getThreadAllocatedBytes(id);
- if (size1 < size) {
- throw new RuntimeException("Allocated bytes " + size1 +
- " expected >= " + size);
+ checkResult(curThread, size, mbean.getThreadAllocatedBytes(id));
+ }
+
+ private static void testGetThreadAllocatedBytes()
+ throws Exception {
+
+ // start a thread
+ done = false; done1 = false;
+ Thread curThread = new MyThread("MyThread");
+ curThread.start();
+ long id = curThread.getId();
+
+ // wait for thread to block after doing some allocation
+ waitUntilThreadBlocked(curThread);
+
+ long size = mbean.getThreadAllocatedBytes(id);
+ ensureValidSize(size);
+
+ // let thread go to do some more allocation
+ synchronized (obj) {
+ done = true;
+ obj.notifyAll();
}
- System.out.println(curThread.getName() +
- " Current thread allocated bytes = " + size +
- " allocated bytes = " + size1);
+
+ // wait for thread to get going again. we don't care if we
+ // catch it in mid-execution or if it hasn't
+ // restarted after we're done sleeping.
+ goSleep(400);
+
+ checkResult(curThread, size, mbean.getThreadAllocatedBytes(id));
+
+ // let thread exit
+ synchronized (obj) {
+ done1 = true;
+ obj.notifyAll();
+ }
+ try {
+ curThread.join();
+ } catch (InterruptedException e) {
+ System.out.println("Unexpected exception is thrown.");
+ e.printStackTrace(System.out);
+ }
+ }
- // start threads, wait for them to block
+ private static void testGetThreadsAllocatedBytes()
+ throws Exception {
+
+ // start threads
+ done = false; done1 = false;
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new MyThread("MyThread-" + i);
threads[i].start();
}
- // threads block after doing some allocation
- waitUntilThreadBlocked();
+ // wait for threads to block after doing some allocation
+ waitUntilThreadsBlocked();
for (int i = 0; i < NUM_THREADS; i++) {
sizes[i] = mbean.getThreadAllocatedBytes(threads[i].getId());
+ ensureValidSize(sizes[i]);
}
- // let threads go and do some more allocation
+ // let threads go to do some more allocation
synchronized (obj) {
done = true;
obj.notifyAll();
}
- // wait for threads to get going again. we don't care if we
+ // wait for threads to get going again. we don't care if we
// catch them in mid-execution or if some of them haven't
// restarted after we're done sleeping.
goSleep(400);
for (int i = 0; i < NUM_THREADS; i++) {
- long newSize = mbean.getThreadAllocatedBytes(threads[i].getId());
- if (sizes[i] > newSize) {
- throw new RuntimeException("TEST FAILED: " +
- threads[i].getName() +
- " previous allocated bytes = " + sizes[i] +
- " > current allocated bytes = " + newSize);
- }
- System.out.println(threads[i].getName() +
- " Previous allocated bytes = " + sizes[i] +
- " Current allocated bytes = " + newSize);
+ checkResult(threads[i], sizes[i],
+ mbean.getThreadAllocatedBytes(threads[i].getId()));
}
// let threads exit
@@ -148,17 +203,30 @@
} catch (InterruptedException e) {
System.out.println("Unexpected exception is thrown.");
e.printStackTrace(System.out);
- testFailed = true;
break;
}
}
- if (testFailed) {
- throw new RuntimeException("TEST FAILED");
+ }
+
+ private static void ensureValidSize(long size) {
+ // implementation could have started measurement when
+ // measurement was enabled, in which case size can be 0
+ if (size < 0) {
+ throw new RuntimeException(
+ "Invalid allocated bytes returned = " + size);
}
-
- System.out.println("Test passed");
}
+ private static void checkResult(Thread curThread,
+ long prev_size, long curr_size) {
+ if (curr_size < prev_size) {
+ throw new RuntimeException("Allocated bytes " + curr_size +
+ " expected >= " + prev_size);
+ }
+ System.out.println(curThread.getName() +
+ " Previous allocated bytes = " + prev_size +
+ " Current allocated bytes = " + curr_size);
+ }
private static void goSleep(long ms) throws Exception {
try {
@@ -169,7 +237,18 @@
}
}
- private static void waitUntilThreadBlocked()
+ private static void waitUntilThreadBlocked(Thread thread)
+ throws Exception {
+ while (true) {
+ goSleep(100);
+ ThreadInfo info = mbean.getThreadInfo(thread.getId());
+ if (info.getThreadState() == Thread.State.WAITING) {
+ break;
+ }
+ }
+ }
+
+ private static void waitUntilThreadsBlocked()
throws Exception {
int count = 0;
while (count != NUM_THREADS) {
@@ -210,7 +289,6 @@
} catch (InterruptedException e) {
System.out.println("Unexpected exception is thrown.");
e.printStackTrace(System.out);
- testFailed = true;
break;
}
}
@@ -225,7 +303,7 @@
" ThreadAllocatedBytes = " + size2);
if (size1 > size2) {
- throw new RuntimeException("TEST FAILED: " + getName() +
+ throw new RuntimeException(getName() +
" ThreadAllocatedBytes = " + size1 +
" > ThreadAllocatedBytes = " + size2);
}
@@ -237,7 +315,6 @@
} catch (InterruptedException e) {
System.out.println("Unexpected exception is thrown.");
e.printStackTrace(System.out);
- testFailed = true;
break;
}
}
--- a/test/jdk/java/io/File/SetLastModified.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/io/File/SetLastModified.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
/* @test
@bug 4091757 6652379 8177809
+ @requires os.maxMemory >= 16G
@summary Basic test for setLastModified method
*/
--- a/test/jdk/java/net/CookieHandler/LocalHostCookie.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/CookieHandler/LocalHostCookie.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
import java.net.*;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.Executors;
+import static java.net.Proxy.NO_PROXY;
/*
* @test
@@ -52,7 +52,7 @@
s = new Server();
s.startServer();
URL url = new URL("http","localhost", s.getPort(), "/");
- HttpURLConnection urlConnection = (HttpURLConnection)url.openConnection();
+ HttpURLConnection urlConnection = (HttpURLConnection)url.openConnection(NO_PROXY);
urlConnection.setRequestMethod("GET");
urlConnection.setDoOutput(true);
urlConnection.connect();
--- a/test/jdk/java/net/DatagramSocket/DatagramTimeout.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/DatagramSocket/DatagramTimeout.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,32 +23,56 @@
/**
* @test
- * @bug 4163126
- * @summary test to see if timeout hangs
- * @run main/timeout=15 DatagramTimeout
+ * @bug 4163126 8222829
+ * @summary Test to see if timeout hangs. Also checks that
+ * negative timeout value fails as expected.
+ * @run testng DatagramTimeout
*/
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.SocketTimeoutException;
+import java.nio.channels.DatagramChannel;
+
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.expectThrows;
+
public class DatagramTimeout {
- public static void main(String[] args) throws Exception {
- boolean success = false;
- DatagramSocket sock = new DatagramSocket();
+ private static final Class<IllegalArgumentException> IAE = IllegalArgumentException.class;
+ private static final Class<SocketTimeoutException> STE = SocketTimeoutException.class;
- try {
- DatagramPacket p;
+ /**
+ * Test DatagramSocket setSoTimeout with a valid timeout value.
+ */
+ @Test
+ public void testSetTimeout() throws Exception {
+ try (DatagramSocket s = new DatagramSocket()) {
byte[] buffer = new byte[50];
- p = new DatagramPacket(buffer, buffer.length);
- sock.setSoTimeout(2);
- sock.receive(p);
- } catch (SocketTimeoutException e) {
- success = true;
- } finally {
- sock.close();
+ DatagramPacket p = new DatagramPacket(buffer, buffer.length);
+ s.setSoTimeout(2);
+ expectThrows(STE, () -> s.receive(p));
}
- if (!success)
- throw new RuntimeException("Socket timeout failure.");
}
+ /**
+ * Test DatagramSocket setSoTimeout with a negative timeout.
+ */
+ @Test
+ public void testSetNegativeTimeout() throws Exception {
+ try (DatagramSocket s = new DatagramSocket()) {
+ expectThrows(IAE, () -> s.setSoTimeout(-1));
+ }
+ }
+
+ /**
+ * Test DatagramSocketAdaptor setSoTimeout with a negative timeout.
+ */
+ @Test
+ public void testNegativeTimeout() throws Exception {
+ try (DatagramChannel dc = DatagramChannel.open()) {
+ var s = dc.socket();
+ expectThrows(IAE, () -> s.setSoTimeout(-1));
+ }
+ }
}
--- a/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -40,6 +40,7 @@
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSession;
import jdk.test.lib.net.SimpleSSLContext;
+import static java.net.Proxy.NO_PROXY;
/*
* @test
@@ -296,7 +297,7 @@
HttpURLConnection conn = (HttpURLConnection)
(authType == HttpAuthType.PROXY
? url.openConnection(proxy)
- : url.openConnection());
+ : url.openConnection(NO_PROXY));
return conn;
}
}
--- a/test/jdk/java/net/InetAddress/ptr/Lookup.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/InetAddress/ptr/Lookup.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,8 @@
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.List;
+import java.util.stream.Stream;
+import java.util.stream.Collectors;
import jdk.test.lib.JDKToolFinder;
import jdk.test.lib.process.OutputAnalyzer;
@@ -55,40 +57,80 @@
public static void main(String args[]) throws IOException {
String addr = null;
String ipv4Name = null;
+ String ipv4Reversed = null;
+
if (args.length == 0) {
- // First check that host resolves to IPv4 address
+ // called from lookupWithIPv4Prefer
+ // obtain an IPv4 address from the hostname.
try {
InetAddress ia = InetAddress.getByName(HOST);
addr = ia.getHostAddress();
+ ia = InetAddress.getByName(addr);
+ System.out.print(addr + ":" + ia.getHostName());
+ return;
+ } catch (UnknownHostException e) {
+ System.out.print(SKIP);
+ return;
+ }
+ } else if (args.length == 2 && args[0].equals("reverse")) {
+ // called from reverseWithIPv4Prefer
+ // Check that IPv4 address can be resolved to host
+ // with -Djava.net.preferIPv4Stack=true
+ try {
+ InetAddress ia = InetAddress.getByName(args[1]);
+ addr = ia.getHostAddress();
+ ipv4Reversed = ia.getHostName();
+ System.out.print(addr + ":" + ipv4Reversed);
+ return;
} catch (UnknownHostException e) {
System.out.print(SKIP);
return;
}
- } else {
- String tmp = lookupWithIPv4Prefer();
- System.out.println("IPv4 lookup results: [" + tmp + "]");
- if (SKIP.equals(tmp)) {
- System.out.println(HOST + " can't be resolved - test skipped.");
- return;
- }
+ } else if (args.length != 1 || !args[0].equals("root")) {
+ throw new IllegalArgumentException(Stream.of(args).collect(Collectors.joining(" ")));
+ }
- String[] strs = tmp.split(":");
- addr = strs[0];
- ipv4Name = strs[1];
+ // spawn a subprocess to obtain the IPv4 address
+ String tmp = lookupWithIPv4Prefer();
+ System.out.println("IPv4 lookup results: [" + tmp + "]");
+ if (SKIP.equals(tmp)) {
+ System.out.println(HOST + " can't be resolved - test skipped.");
+ return;
}
- // reverse lookup
+ String[] strs = tmp.split(":");
+ addr = strs[0];
+ ipv4Name = strs[1];
+
+ // check that the a reverse lookup of the IPv4 address
+ // will succeed with the IPv4 only stack
+ tmp = reverseWithIPv4Prefer(addr);
+ System.out.println("IPv4 reverse lookup results: [" + tmp + "]");
+ if (SKIP.equals(tmp)) {
+ System.out.println(addr + " can't be resolved with preferIPv4 - test skipped.");
+ return;
+ }
+
+ strs = tmp.split(":");
+ ipv4Reversed = strs[1];
+
+ // Now check that a reverse lookup will succeed with the dual stack.
InetAddress ia = InetAddress.getByName(addr);
String name = ia.getHostName();
- if (args.length == 0) {
- System.out.print(addr + ":" + name);
- return;
- } else {
- System.out.println("(default) " + addr + "--> " + name);
- if (!ipv4Name.equals(name)) {
- throw new RuntimeException("Mismatch between default"
- + " and java.net.preferIPv4Stack=true results");
+
+ System.out.println("(default) " + addr + "--> " + name
+ + " (reversed IPv4: " + ipv4Reversed + ")");
+ if (!ipv4Name.equals(name)) {
+ // adding some diagnosting
+ System.err.println("name=" + name + " doesn't match expected=" + ipv4Name);
+ System.err.println("Listing all adresses:");
+ for (InetAddress any : InetAddress.getAllByName(HOST)) {
+ System.err.println("\t[" + any + "] address=" + any.getHostAddress()
+ + ", host=" + any.getHostName());
}
+ // make the test fail...
+ throw new RuntimeException("Mismatch between default"
+ + " and java.net.preferIPv4Stack=true results");
}
}
@@ -100,5 +142,13 @@
System.out.println("Executing: " + cmd);
return new OutputAnalyzer(new ProcessBuilder(cmd).start()).getOutput();
}
+
+ static String reverseWithIPv4Prefer(String addr) throws IOException {
+ String java = JDKToolFinder.getTestJDKTool("java");
+ String testClz = Lookup.class.getName();
+ List<String> cmd = List.of(java, "-Djava.net.preferIPv4Stack=true",
+ "-cp", CLASS_PATH, testClz, "reverse", addr);
+ System.out.println("Executing: " + cmd);
+ return new OutputAnalyzer(new ProcessBuilder(cmd).start()).getOutput();
+ }
}
-
--- a/test/jdk/java/net/MulticastSocket/UnreferencedMulticastSockets.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/MulticastSocket/UnreferencedMulticastSockets.java Fri Oct 11 12:08:01 2019 +0530
@@ -50,6 +50,7 @@
import java.util.ArrayDeque;
import java.util.List;
import java.util.Optional;
+import java.util.concurrent.Phaser;
import java.util.concurrent.TimeUnit;
import jdk.test.lib.net.IPSupport;
@@ -72,11 +73,14 @@
static class Server implements Runnable {
MulticastSocket ss;
-
+ final int port;
+ final Phaser phaser = new Phaser(2);
Server() throws IOException {
+ InetAddress loopback = InetAddress.getLoopbackAddress();
InetSocketAddress serverAddress =
- new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
+ new InetSocketAddress(loopback, 0);
ss = new MulticastSocket(serverAddress);
+ port = ss.getLocalPort();
System.out.printf(" DatagramServer addr: %s: %d%n",
this.getHost(), this.getPort());
pendingSockets.add(new NamedWeak(ss, pendingQueue, "serverMulticastSocket"));
@@ -89,7 +93,7 @@
}
int getPort() {
- return ss.getLocalPort();
+ return port;
}
// Receive a byte and send back a byte
@@ -98,12 +102,18 @@
byte[] buffer = new byte[50];
DatagramPacket p = new DatagramPacket(buffer, buffer.length);
ss.receive(p);
+ System.out.printf("Server: ping received from: %s%n", p.getSocketAddress());
+ phaser.arriveAndAwaitAdvance(); // await the client...
buffer[0] += 1;
+ System.out.printf("Server: sending echo to: %s%n", p.getSocketAddress());
ss.send(p); // send back +1
+ System.out.printf("Server: awaiting client%n");
+ phaser.arriveAndAwaitAdvance(); // await the client...
// do NOT close but 'forget' the socket reference
+ System.out.printf("Server: forgetting socket...%n");
ss = null;
- } catch (Exception ioe) {
+ } catch (Throwable ioe) {
ioe.printStackTrace();
}
}
@@ -112,8 +122,11 @@
public static void main(String args[]) throws Exception {
IPSupport.throwSkippedExceptionIfNonOperational();
+ InetSocketAddress clientAddress =
+ new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
+
// Create and close a MulticastSocket to warm up the FD count for side effects.
- try (MulticastSocket s = new MulticastSocket(0)) {
+ try (MulticastSocket s = new MulticastSocket(clientAddress)) {
// no-op; close immediately
s.getLocalPort(); // no-op
}
@@ -126,8 +139,33 @@
Thread thr = new Thread(svr);
thr.start();
- MulticastSocket client = new MulticastSocket(0);
- System.out.printf(" client bound port: %d%n", client.getLocalPort());
+ // It is possible under some circumstances that the client
+ // might get bound to the same port than the server: this
+ // would make the test fail - so if this happen we try to
+ // bind to a specific port by incrementing the server port.
+ MulticastSocket client = null;
+ int serverPort = svr.getPort();
+ int maxtries = 20;
+ for (int i = 0; i < maxtries; i++) {
+ try {
+ System.out.printf("Trying to bind client to: %s%n", clientAddress);
+ client = new MulticastSocket(clientAddress);
+ if (client.getLocalPort() != svr.getPort()) break;
+ client.close();
+ } catch (IOException x) {
+ System.out.printf("Couldn't create client after %d attempts: %s%n", i, x);
+ if (i == maxtries) throw x;
+ }
+ if (i == maxtries) {
+ String msg = String.format("Couldn't create client after %d attempts", i);
+ System.out.println(msg);
+ throw new AssertionError(msg);
+ }
+ clientAddress = new InetSocketAddress(clientAddress.getAddress(), serverPort + i);
+ }
+
+ System.out.printf(" client bound port: %s:%d%n",
+ client.getLocalAddress(), client.getLocalPort());
client.connect(svr.getHost(), svr.getPort());
pendingSockets.add(new NamedWeak(client, pendingQueue, "clientMulticastSocket"));
extractRefs(client, "clientMulticastSocket");
@@ -136,14 +174,17 @@
msg[0] = 1;
DatagramPacket p = new DatagramPacket(msg, msg.length, svr.getHost(), svr.getPort());
client.send(p);
+ System.out.printf(" ping sent to: %s:%d%n", svr.getHost(), svr.getPort());
+ svr.phaser.arriveAndAwaitAdvance(); // wait until the server has received its packet
p = new DatagramPacket(msg, msg.length);
client.receive(p);
- System.out.printf("echo received from: %s%n", p.getSocketAddress());
+ System.out.printf(" echo received from: %s%n", p.getSocketAddress());
if (msg[0] != 2) {
throw new AssertionError("incorrect data received: expected: 2, actual: " + msg[0]);
}
+ svr.phaser.arriveAndAwaitAdvance(); // let the server null out its socket
// Do NOT close the MulticastSocket; forget it
--- a/test/jdk/java/net/ProxySelector/NullSelector.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/ProxySelector/NullSelector.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,7 @@
* @bug 6215885
* @library /test/lib
* @summary URLConnection.openConnection NPE if ProxySelector.setDefault is set to null
+ * @run main/othervm NullSelector
*/
import java.net.*;
--- a/test/jdk/java/net/ResponseCache/B6181108.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/ResponseCache/B6181108.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,6 +33,7 @@
import java.util.*;
import java.io.*;
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
public class B6181108 implements Runnable {
ServerSocket ss;
@@ -105,7 +106,7 @@
.toString();
urlWithSpace = base + "/space%20test/page1.html";
URL url = new URL(urlWithSpace);
- URLConnection urlc = url.openConnection();
+ URLConnection urlc = url.openConnection(NO_PROXY);
int i = ((HttpURLConnection)(urlc)).getResponseCode();
System.out.println("response code = " + i);
ResponseCache.setDefault(null);
--- a/test/jdk/java/net/ResponseCache/ResponseCacheTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/ResponseCache/ResponseCacheTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,6 +33,7 @@
import java.io.*;
import javax.net.ssl.*;
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
/**
* Request should get serviced by the cache handler. Response get
@@ -137,7 +138,7 @@
.port(ss.getLocalPort())
.path("/file2.1")
.toURL();
- http = (HttpURLConnection)url2.openConnection();
+ http = (HttpURLConnection)url2.openConnection(NO_PROXY);
System.out.println("responsecode2 is :"+http.getResponseCode());
Map<String,List<String>> headers2 = http.getHeaderFields();
--- a/test/jdk/java/net/ResponseCache/getResponseCode.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/ResponseCache/getResponseCode.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
import java.net.*;
import java.util.*;
import java.io.*;
+import static java.net.Proxy.NO_PROXY;
/**
@@ -43,7 +44,7 @@
getResponseCode() throws Exception {
url = new URL("http://localhost/file1.cache");
- HttpURLConnection http = (HttpURLConnection)url.openConnection();
+ HttpURLConnection http = (HttpURLConnection)url.openConnection(NO_PROXY);
int respCode = http.getResponseCode();
http.disconnect();
--- a/test/jdk/java/net/SocketImpl/SocketImplCombinations.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/SocketImpl/SocketImplCombinations.java Fri Oct 11 12:08:01 2019 +0530
@@ -68,7 +68,7 @@
* Test creating a connected Socket, it should be created with a platform SocketImpl.
*/
public void testNewSocket2() throws IOException {
- try (ServerSocket ss = new ServerSocket(0)) {
+ try (ServerSocket ss = boundServerSocket()) {
try (Socket s = new Socket(ss.getInetAddress(), ss.getLocalPort())) {
SocketImpl si = getSocketImpl(s);
assertTrue(isSocksSocketImpl(si));
@@ -127,7 +127,7 @@
Socket s = new Socket((SocketImpl) null) { };
try (s) {
assertTrue(getSocketImpl(s) == null);
- s.bind(new InetSocketAddress(0)); // force SocketImpl to be created
+ s.bind(loopbackSocketAddress()); // force SocketImpl to be created
SocketImpl si = getSocketImpl(s);
assertTrue(isSocksSocketImpl(si));
SocketImpl delegate = getDelegate(si);
@@ -218,7 +218,7 @@
Socket s = new Socket((SocketImpl) null) { };
try (s) {
assertTrue(getSocketImpl(s) == null);
- s.bind(new InetSocketAddress(0)); // force SocketImpl to be created
+ s.bind(loopbackSocketAddress()); // force SocketImpl to be created
assertTrue(getSocketImpl(s) instanceof CustomSocketImpl);
}
} finally {
@@ -378,7 +378,7 @@
public void testServerSocketAccept5a() throws IOException {
SocketImpl serverImpl = new CustomSocketImpl(true);
try (ServerSocket ss = new ServerSocket(serverImpl) { }) {
- ss.bind(new InetSocketAddress(0));
+ ss.bind(loopbackSocketAddress());
expectThrows(IOException.class, ss::accept);
}
}
@@ -566,16 +566,36 @@
}
/**
+ * Returns a new InetSocketAddress with the loopback interface
+ * and port 0.
+ */
+ static InetSocketAddress loopbackSocketAddress() {
+ InetAddress loopback = InetAddress.getLoopbackAddress();
+ return new InetSocketAddress(loopback, 0);
+ }
+
+ /**
+ * Returns a ServerSocket bound to a port on the loopback address
+ */
+ static ServerSocket boundServerSocket() throws IOException {
+ ServerSocket ss = new ServerSocket();
+ ss.bind(loopbackSocketAddress());
+ return ss;
+ }
+
+ /**
* Creates a ServerSocket that returns the given Socket from accept.
*/
static ServerSocket serverSocketToAccept(Socket s) throws IOException {
- return new ServerSocket(0) {
+ ServerSocket ss = new ServerSocket() {
@Override
public Socket accept() throws IOException {
implAccept(s);
return s;
}
};
+ ss.bind(loopbackSocketAddress());
+ return ss;
}
/**
@@ -590,7 +610,7 @@
return s;
}
};
- ss.bind(new InetSocketAddress(0));
+ ss.bind(loopbackSocketAddress());
return ss;
}
--- a/test/jdk/java/net/URLConnection/B5052093.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLConnection/B5052093.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,6 +33,7 @@
import java.net.*;
import java.io.*;
import sun.net.www.protocol.file.FileURLConnection;
+import static java.net.Proxy.NO_PROXY;
public class B5052093 implements HttpCallback {
private static TestHttpServer server;
@@ -68,7 +69,7 @@
server = new TestHttpServer(new B5052093(), 1, 10, loopback, 0);
try {
URL url = new URL("http://" + server.getAuthority() + "/foo");
- URLConnection conn = url.openConnection();
+ URLConnection conn = url.openConnection(NO_PROXY);
int i = conn.getContentLength();
long l = conn.getContentLengthLong();
if (i != -1 || l != testSize) {
--- a/test/jdk/java/net/URLConnection/DisconnectAfterEOF.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLConnection/DisconnectAfterEOF.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,8 +33,7 @@
import java.io.*;
import java.util.*;
import jdk.test.lib.net.URIBuilder;
-
-
+import static java.net.Proxy.NO_PROXY;
public class DisconnectAfterEOF {
@@ -217,7 +216,7 @@
}
static URLConnection doRequest(String uri) throws IOException {
- URLConnection uc = (new URL(uri)).openConnection();
+ URLConnection uc = (new URL(uri)).openConnection(NO_PROXY);
uc.setDoOutput(true);
OutputStream out = uc.getOutputStream();
out.write(new byte[16000]);
--- a/test/jdk/java/net/URLConnection/HttpContinueStackOverflow.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLConnection/HttpContinueStackOverflow.java Fri Oct 11 12:08:01 2019 +0530
@@ -38,8 +38,8 @@
import java.net.Socket;
import java.net.URL;
import java.net.HttpURLConnection;
-
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
public class HttpContinueStackOverflow {
@@ -93,7 +93,7 @@
.path("/anything.html")
.toURL();
- HttpURLConnection conn = (HttpURLConnection)url.openConnection();
+ HttpURLConnection conn = (HttpURLConnection)url.openConnection(NO_PROXY);
conn.getResponseCode();
System.out.println("TEST PASSED");
}
--- a/test/jdk/java/net/URLConnection/Redirect307Test.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLConnection/Redirect307Test.java Fri Oct 11 12:08:01 2019 +0530
@@ -29,8 +29,8 @@
*/
import java.io.*;
import java.net.*;
-
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
class RedirServer extends Thread {
@@ -113,7 +113,7 @@
.loopback()
.port(port)
.toURL();
- URLConnection conURL = url.openConnection();
+ URLConnection conURL = url.openConnection(NO_PROXY);
conURL.setDoInput(true);
conURL.setAllowUserInteraction(false);
conURL.setUseCaches(false);
--- a/test/jdk/java/net/URLConnection/Responses.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLConnection/Responses.java Fri Oct 11 12:08:01 2019 +0530
@@ -29,6 +29,7 @@
*/
import java.net.*;
import java.io.*;
+import static java.net.Proxy.NO_PROXY;
public class Responses {
@@ -149,7 +150,7 @@
System.out.println("Test with response: >" + tests[i][0] + "<");
URL url = new URL("http://" + authority + "/" + i);
- HttpURLConnection http = (HttpURLConnection)url.openConnection();
+ HttpURLConnection http = (HttpURLConnection)url.openConnection(NO_PROXY);
try {
--- a/test/jdk/java/net/URLConnection/URLConnectionHeaders.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLConnection/URLConnectionHeaders.java Fri Oct 11 12:08:01 2019 +0530
@@ -35,6 +35,7 @@
import java.util.*;
import java.io.*;
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
public class URLConnectionHeaders {
@@ -94,7 +95,7 @@
.port(port)
.path("/index.html")
.toURL();
- URLConnection uc = url.openConnection();
+ URLConnection uc = url.openConnection(NO_PROXY);
// add request properties
uc.addRequestProperty("Cookie", "cookie1");
--- a/test/jdk/java/net/URLConnection/contentHandler/UserContentHandler.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLConnection/contentHandler/UserContentHandler.java Fri Oct 11 12:08:01 2019 +0530
@@ -40,6 +40,7 @@
import java.io.*;
import java.util.*;
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
public class UserContentHandler implements Runnable {
@@ -98,7 +99,7 @@
.path("/anything.txt")
.toURL();
- if (!(u.openConnection().getContent() instanceof String)) {
+ if (!(u.openConnection(NO_PROXY).getContent() instanceof String)) {
throw new RuntimeException("Load user defined content handler failed.");
} else {
System.err.println("Load user defined content handler succeed!");
--- a/test/jdk/java/net/URLPermission/OpenURL.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/URLPermission/OpenURL.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
import java.net.*;
import java.io.*;
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
public class OpenURL {
@@ -46,7 +47,7 @@
.path("/a/b")
.toURL();
System.out.println("URL: " + url);
- HttpURLConnection urlc = (HttpURLConnection)url.openConnection();
+ HttpURLConnection urlc = (HttpURLConnection)url.openConnection(NO_PROXY);
InputStream is = urlc.getInputStream();
// error will throw exception other than SecurityException
} catch (SecurityException e) {
--- a/test/jdk/java/net/httpclient/DigestEchoServer.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/httpclient/DigestEchoServer.java Fri Oct 11 12:08:01 2019 +0530
@@ -26,6 +26,8 @@
import com.sun.net.httpserver.HttpsConfigurator;
import com.sun.net.httpserver.HttpsParameters;
import com.sun.net.httpserver.HttpsServer;
+
+import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -1568,8 +1570,8 @@
@Override
public void run() {
try {
+ int c = 0;
try {
- int c;
while ((c = is.read()) != -1) {
os.write(c);
os.flush();
@@ -1578,11 +1580,13 @@
if (DEBUG) System.out.print(tag);
}
is.close();
+ } catch (IOException ex) {
+ if (DEBUG || !stopped && c > -1)
+ ex.printStackTrace(System.out);
+ end.completeExceptionally(ex);
} finally {
- os.close();
+ try {os.close();} catch (Throwable t) {}
}
- } catch (IOException ex) {
- if (DEBUG) ex.printStackTrace(System.out);
} finally {
end.complete(null);
}
@@ -1632,10 +1636,12 @@
@Override
public void run() {
Socket clientConnection = null;
+ Socket targetConnection = null;
try {
while (!stopped) {
System.out.println(now() + "Tunnel: Waiting for client");
Socket toClose;
+ targetConnection = clientConnection = null;
try {
toClose = clientConnection = ss.accept();
if (NO_LINGER) {
@@ -1649,7 +1655,6 @@
}
System.out.println(now() + "Tunnel: Client accepted");
StringBuilder headers = new StringBuilder();
- Socket targetConnection = null;
InputStream ccis = clientConnection.getInputStream();
OutputStream ccos = clientConnection.getOutputStream();
Writer w = new OutputStreamWriter(
@@ -1769,28 +1774,44 @@
end1 = new CompletableFuture<>());
Thread t2 = pipe(targetConnection.getInputStream(), ccos, '-',
end2 = new CompletableFuture<>());
- end = CompletableFuture.allOf(end1, end2);
+ var end11 = end1.whenComplete((r, t) -> exceptionally(end2, t));
+ var end22 = end2.whenComplete((r, t) -> exceptionally(end1, t));
+ end = CompletableFuture.allOf(end11, end22);
+ Socket tc = targetConnection;
end.whenComplete(
(r,t) -> {
try { toClose.close(); } catch (IOException x) { }
+ try { tc.close(); } catch (IOException x) { }
finally {connectionCFs.remove(end);}
});
connectionCFs.add(end);
+ targetConnection = clientConnection = null;
t1.start();
t2.start();
}
} catch (Throwable ex) {
- try {
- ss.close();
- } catch (IOException ex1) {
- ex.addSuppressed(ex1);
- }
+ close(clientConnection, ex);
+ close(targetConnection, ex);
+ close(ss, ex);
ex.printStackTrace(System.err);
} finally {
System.out.println(now() + "Tunnel: exiting (stopped=" + stopped + ")");
connectionCFs.forEach(cf -> cf.complete(null));
}
}
+
+ void exceptionally(CompletableFuture<?> cf, Throwable t) {
+ if (t != null) cf.completeExceptionally(t);
+ }
+
+ void close(Closeable c, Throwable e) {
+ if (c == null) return;
+ try {
+ c.close();
+ } catch (IOException x) {
+ e.addSuppressed(x);
+ }
+ }
}
/**
--- a/test/jdk/java/net/httpclient/ManyRequestsLegacy.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/httpclient/ManyRequestsLegacy.java Fri Oct 11 12:08:01 2019 +0530
@@ -74,6 +74,7 @@
import java.util.logging.Logger;
import java.util.logging.Level;
import jdk.test.lib.net.SimpleSSLContext;
+import static java.net.Proxy.NO_PROXY;
public class ManyRequestsLegacy {
@@ -159,7 +160,7 @@
long start = System.nanoTime();
try {
CompletableFuture<LegacyHttpResponse> cf = new CompletableFuture<>();
- URLConnection urlc = r.uri().toURL().openConnection();
+ URLConnection urlc = r.uri().toURL().openConnection(NO_PROXY);
HttpURLConnection httpc = (HttpURLConnection)urlc;
httpc.setRequestMethod(r.method());
for (String s : r.headers().map().keySet()) {
--- a/test/jdk/java/net/httpclient/PlainProxyConnectionTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/httpclient/PlainProxyConnectionTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -44,6 +44,7 @@
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.util.stream.Collectors;
+import static java.net.Proxy.NO_PROXY;
/**
* @test
@@ -139,7 +140,7 @@
throws IOException {
connections.clear();
System.out.println("Verifying communication with server");
- try (InputStream is = uri.toURL().openConnection().getInputStream()) {
+ try (InputStream is = uri.toURL().openConnection(NO_PROXY).getInputStream()) {
String resp = new String(is.readAllBytes(), StandardCharsets.UTF_8);
System.out.println(resp);
if (!RESPONSE.equals(resp)) {
--- a/test/jdk/java/net/httpclient/ProxyTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/net/httpclient/ProxyTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -57,6 +57,7 @@
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import jdk.test.lib.net.SimpleSSLContext;
+import static java.net.Proxy.NO_PROXY;
/**
* @test
@@ -167,7 +168,7 @@
System.out.println("Verifying communication with server");
URI uri = new URI("https://localhost:"
+ server.getAddress().getPort() + PATH + "x");
- try (InputStream is = uri.toURL().openConnection().getInputStream()) {
+ try (InputStream is = uri.toURL().openConnection(NO_PROXY).getInputStream()) {
String resp = new String(is.readAllBytes(), StandardCharsets.UTF_8);
System.out.println(resp);
if (!RESPONSE.equals(resp)) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/nio/channels/DatagramChannel/AddressesAfterDisconnect.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test
+ * @library /test/lib
+ * @summary Test DatagramChannel local address after disconnect.
+ * @requires (os.family != "mac")
+ * @run testng/othervm AddressesAfterDisconnect
+ * @run testng/othervm -Djava.net.preferIPv6Addresses=true AddressesAfterDisconnect
+ * @run testng/othervm -Djava.net.preferIPv4Stack=true AddressesAfterDisconnect
+ */
+
+import jdk.test.lib.net.IPSupport;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.net.StandardProtocolFamily;
+import java.nio.channels.DatagramChannel;
+
+import org.testng.annotations.Test;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+import static org.testng.Assert.assertFalse;
+
+public class AddressesAfterDisconnect {
+
+ public static void main(String[] args) throws IOException {
+ new AddressesAfterDisconnect().execute();
+ }
+
+ @Test
+ public void execute() throws IOException {
+ IPSupport.throwSkippedExceptionIfNonOperational();
+ boolean preferIPv6 = Boolean.getBoolean("java.net.preferIPv6Addresses");
+
+ // test with default protocol family
+ try (DatagramChannel dc = DatagramChannel.open()) {
+ System.out.println("Test with default");
+ dc.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
+ test(dc);
+ test(dc);
+ }
+
+ if (IPSupport.hasIPv6()) {
+ // test with IPv6 only
+ System.out.println("Test with IPv6 only");
+ try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET6)) {
+ dc.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
+ test(dc);
+ test(dc);
+ }
+ }
+
+ if (IPSupport.hasIPv4() && !preferIPv6) {
+ // test with IPv4 only
+ System.out.println("Test with IPv4 only");
+ try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET)) {
+ dc.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
+ test(dc);
+ test(dc);
+ }
+ }
+ }
+
+ /**
+ * Connect DatagramChannel to a server, write a datagram and disconnect. Invoke
+ * a second or subsequent time with the same DatagramChannel instance to check
+ * that disconnect works as expected.
+ */
+ static void test(DatagramChannel dc) throws IOException {
+ SocketAddress local = dc.getLocalAddress();
+ try (DatagramChannel server = DatagramChannel.open()) {
+ server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
+ SocketAddress remote = server.getLocalAddress();
+ dc.connect(remote);
+ assertTrue(dc.isConnected());
+ // comment the following two lines on OS X to see JDK-8231259
+ assertEquals(dc.getLocalAddress(), local, "local address after connect");
+ assertEquals(dc.getRemoteAddress(), remote, "remote address after connect");
+ dc.disconnect();
+ assertFalse(dc.isConnected());
+ assertEquals(dc.getLocalAddress(), local, "local address after disconnect");
+ assertEquals(dc.getRemoteAddress(), null, "remote address after disconnect");
+ }
+ }
+
+}
--- a/test/jdk/java/nio/file/attribute/BasicFileAttributeView/SetTimesNanos.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/nio/file/attribute/BasicFileAttributeView/SetTimesNanos.java Fri Oct 11 12:08:01 2019 +0530
@@ -22,9 +22,9 @@
*/
/* @test
- * @bug 8181493
+ * @bug 8181493 8231174
* @summary Verify that nanosecond precision is maintained for file timestamps
- * @requires (os.family == "linux") | (os.family == "mac") | (os.family == "solaris")
+ * @requires (os.family == "linux") | (os.family == "mac") | (os.family == "solaris") | (os.family == "windows")
* @modules java.base/sun.nio.fs:+open
*/
@@ -40,14 +40,21 @@
import java.util.concurrent.TimeUnit;
public class SetTimesNanos {
+ private static final boolean IS_WINDOWS =
+ System.getProperty("os.name").startsWith("Windows");
+
public static void main(String[] args) throws Exception {
- // Check whether futimens() system call is supported
- Class unixNativeDispatcherClass = Class.forName("sun.nio.fs.UnixNativeDispatcher");
- Method futimensSupported = unixNativeDispatcherClass.getDeclaredMethod("futimensSupported");
- futimensSupported.setAccessible(true);
- if (!(boolean)futimensSupported.invoke(null)) {
- System.err.println("futimens() system call not supported; skipping test");
- return;
+ if (!IS_WINDOWS) {
+ // Check whether futimens() system call is supported
+ Class unixNativeDispatcherClass =
+ Class.forName("sun.nio.fs.UnixNativeDispatcher");
+ Method futimensSupported =
+ unixNativeDispatcherClass.getDeclaredMethod("futimensSupported");
+ futimensSupported.setAccessible(true);
+ if (!(boolean)futimensSupported.invoke(null)) {
+ System.err.println("futimens() not supported; skipping test");
+ return;
+ }
}
Path dirPath = Path.of("test");
@@ -56,7 +63,8 @@
System.out.format("FileStore: \"%s\" on %s (%s)%n",
dir, store.name(), store.type());
- Set<String> testedTypes = Set.of("apfs", "ext4", "xfs", "zfs");
+ Set<String> testedTypes = IS_WINDOWS ?
+ Set.of("NTFS") : Set.of("apfs", "ext4", "xfs", "zfs");
if (!testedTypes.contains(store.type())) {
System.err.format("%s not in %s; skipping test", store.type(), testedTypes);
return;
@@ -77,6 +85,11 @@
Files.getFileAttributeView(path, BasicFileAttributeView.class);
view.setTimes(pathTime, pathTime, null);
+ // Windows file time resolution is 100ns so truncate
+ if (IS_WINDOWS) {
+ timeNanos = 100L*(timeNanos/100L);
+ }
+
// Read attributes
BasicFileAttributes attrs =
Files.readAttributes(path, BasicFileAttributes.class);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/security/Provider/GetServiceRace.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8231387
+ * @library ../testlibrary
+ * @summary make sure getService() avoids a race
+ * @author Tianmin Shi
+ */
+
+import java.security.Provider;
+
+public class GetServiceRace {
+
+ private static final Provider testProvider;
+ static {
+ testProvider = new Provider("MyProvider", 1.0, "test") {
+ };
+ testProvider.put("CertificateFactory.Fixed", "MyCertificateFactory");
+ }
+
+ private static final int NUMBER_OF_RETRIEVERS = 3;
+ private static final int TEST_TIME_MS = 1000;
+
+ public static boolean testFailed = false;
+
+ public static void main(String[] args) throws Exception {
+ Updater updater = new Updater();
+ updater.start();
+ Retriever [] retrievers = new Retriever[NUMBER_OF_RETRIEVERS];
+ for (int i=0; i<retrievers.length; i++) {
+ retrievers[i] = new Retriever();
+ retrievers[i].start();
+ }
+ Thread.sleep(TEST_TIME_MS);
+ System.out.println("Interrupt");
+ updater.interrupt();
+ updater.join();
+ for (int i=0; i<retrievers.length; i++) {
+ retrievers[i].interrupt();
+ retrievers[i].join();
+ }
+ System.out.println("Done");
+ if (testFailed) {
+ throw new Exception("Test Failed");
+ }
+ System.out.println("Test Passed");
+ }
+
+ private static class Updater extends Thread {
+ @Override
+ public void run() {
+ while (!isInterrupted()) {
+ testProvider.put("CertificateFactory.Added", "MyCertificateFactory");
+ }
+ System.out.println("Updater stopped");
+ }
+ }
+
+ private static class Retriever extends Thread {
+ @Override
+ public void run() {
+ while (!isInterrupted()) {
+ Provider.Service service = testProvider.getService("CertificateFactory", "Fixed");
+ if (service == null) {
+ if (!testFailed) {
+ System.err.println("CertificateFactory.Fixed is NULL");
+ testFailed = true;
+ }
+ } else {
+ //System.out.println("CertificateFactory.Fixed is good");
+ }
+ }
+ System.out.println("Retriever stopped");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/text/Format/DateFormat/SimpleDateFormatPatternTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 4326988 6990146 8231213
+ * @summary test SimpleDateFormat, check its pattern in the constructor
+ * @run testng/othervm SimpleDateFormatPatternTest
+ */
+import java.lang.IllegalArgumentException;
+import java.text.DateFormat;
+import java.text.DateFormatSymbols;
+import java.text.SimpleDateFormat;
+import java.util.Locale;
+
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+public class SimpleDateFormatPatternTest {
+ private static String[] validPat = {
+ "yyyy-MM-dd h.mm.ss.a z",
+ "yyyy'M'd' ahh'mm'ss' z",
+ "dd MMMM yyyy hh:mm:ss",
+ "d MMM yy HH:mm:ss",
+ "dd/MM/yyyy HH:mm:ss",
+ "d' / 'MMMM' / 'yyyy HH:mm:ss z",
+ "d.M.yyyy H:mm:ss",
+ "d' de 'MMMM' de 'yyyy H'h'm'min's's' z",
+ "dd. MMMM yyyy HH:mm:ss z",
+ "d-M-yyyy H:mm:ss",
+ "EEEE''d MMMM G yyyy, H' 'm' 'ss' '",
+ "dd.MMM.yyyy HH:mm:ss",
+ "yy-MM-dd h:mm:ss.a",
+ "d' de 'MMMM' de 'yyyy hh:mm:ss a z",
+ "EEEE d MMMM yyyy H' h 'mm' min 'ss' s 'z",
+ "d MMMM yyyy H:mm:ss",
+ "d/MM/yyyy hh:mm:ss a",
+ "EEEE, d, MMMM yyyy HH:mm:ss z",
+ "EEEE, d. MMMM yyyy HH.mm.' h' z",
+ "EEEE, d' / 'MMMM' / 'yyyy HH:mm:ss z",
+ "d/MM/yyyy HH:mm:ss",
+ "d MMMM yyyy H:mm:ss z",
+ "MMMM d, yyyy h:mm:ss a z",
+ "yyyy. MMMM d. H:mm:ss z",
+ "d' de 'MMMM' de 'yyyy H:mm:ss z",
+ "EEEE, MMMM d, yyyy h:mm:ss a z",
+ "d/M/yyyy H:mm:ss",
+ "d-MMM-yy HH:mm:ss",
+ "EEEE d' de 'MMMM' de 'yyyy hh:mm:ss a z",
+ "yyyy'M'd' ahh'mm'ss'",
+ "yyyy'MM'dd' EEEE ahh'mm'ss'",
+ "EEEE, d MMMM yyyy HH:mm:ss z",
+
+ //6990146: 'Y' for year; 'X' for time zone; 'u' for day number of the week
+ "d/M/YYYY H:mm:ss",
+ "d-MMM-YY HH:mm:ss",
+ "EEEE d' de 'MMMM' de 'YYYY hh:mm:ss a X",
+ "YYYY M d ahh:mm:ss",
+ "YYYY MM dd EEEE u ahh/mm/ss",
+ "EEEE, u, d MMMM YYYY HH:mm:ss X",
+ "YYYY M d Z ahh mm ss",
+ "YYYY-MM-dd EEEE u ahh-mm-ss",
+
+ //*added for sr-Latn*
+ "EEEE, dd. MMMM y. HH.mm.ss zzzz",
+ "dd. MMMM y. HH.mm.ss z",
+ "dd.MM.y. HH.mm.ss",
+ "d.M.yy. HH.mm"
+ };
+
+ private static String[] invalidPat = {
+ "yyyy'M'd' ahh:mm:ss",
+ "EEEe d MM MM yyyy HH' h 'mm zzzZ",
+ "d MMMM\\ yyyy, H' 'm' 'g",
+ "EEEE d' @# MMMMde 'yyys HHH'mm z",
+ "yyyy'MMe 2 #dd' EEEEahh'mm'ss' z,z",
+ "yyyy.M.d H;mm.ses",
+ "EEEe, d MMMM yyyy h:mm:ss a z",
+ "EEEE, MMMM d, 'y y y y h:mm:ss 'o''clock' a z",
+ "dd MMMM yyyy 0HHcl:mm:ss z",
+ "d.M_M_y.yy1yy HextH:mm|45:",
+ "d,D MMMTTTTTTTTTKM yy|+yy HH:m m:ss z",
+ "d-MDtM M-yy H:mm:ss",
+ "yyyy/M///m/nM/d Dd H:m$m:s s",
+ "EEEE, dd. MMMM yyyy HH:m'''m' Uhr 'z",
+ //6990146
+ "EEEE d' de 'MMMM' de 'YYYY hh:mm:ss a x",
+ "EEEE, U, d MMMM YYYY HH:mm:ss Z"
+ };
+
+ private static Locale[] locales = DateFormat.getAvailableLocales();
+ private static Object[][] dfAllLocalesObj = createAllLocales();
+ private static Object[][] invalidPatObj = createPatternObj(invalidPat);
+ private static Object[][] validPatObj = createPatternObj(validPat);
+
+ private static Object[][] createAllLocales() {
+ Object[][] objArray = new Object[locales.length][];
+ for (int i = 0; i < locales.length; i++) {
+ objArray[i] = new Object[1];
+ objArray[i][0] = locales[i];
+ }
+ return objArray;
+ }
+
+ private static Object[][] createPatternObj(String[] pattern){
+ Object[][] objArray = new Object[locales.length * pattern.length][];
+ int k = 0;
+ for (int i = 0; i < locales.length; i++) {
+ for (int j = 0; j < pattern.length; j++) {
+ objArray[k] = new Object[2];
+ objArray[k][0] = pattern[j];
+ objArray[k][1] = locales[i];
+ k = k + 1;
+ }
+ }
+ return objArray;
+ }
+
+ @DataProvider(name = "dfAllLocalesObj")
+ Object[][] dfAllLocalesObj() {
+ return dfAllLocalesObj;
+ }
+
+ @DataProvider(name = "invalidPatternObj")
+ Object[][] invalidPatternObj() {
+ return invalidPatObj;
+ }
+
+ @DataProvider(name = "validPatternObj")
+ Object[][] validPatternObj() {
+ return validPatObj;
+ }
+
+ //check Constructors for invalid pattern
+ @Test(dataProvider = "invalidPatternObj",
+ expectedExceptions = IllegalArgumentException.class)
+ public void testIllegalArgumentException1(String pattern, Locale loc)
+ throws IllegalArgumentException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat(pattern);
+ }
+
+ @Test(dataProvider = "invalidPatternObj",
+ expectedExceptions = IllegalArgumentException.class)
+ public void testIllegalArgumentException2(String pattern, Locale loc)
+ throws IllegalArgumentException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat(pattern, new DateFormatSymbols());
+ }
+
+ @Test(dataProvider = "invalidPatternObj",
+ expectedExceptions = IllegalArgumentException.class)
+ public void testIllegalArgumentException3 (String pattern, Locale loc)
+ throws IllegalArgumentException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat(pattern, Locale.getDefault());
+ }
+
+ @Test(dataProvider = "invalidPatternObj",
+ expectedExceptions = IllegalArgumentException.class)
+ public void testIllegalArgumentException4(String pattern, Locale loc)
+ throws IllegalArgumentException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat().applyPattern(pattern);
+ }
+
+ //check Constructors for null pattern
+ @Test(dataProvider = "dfAllLocalesObj",
+ expectedExceptions = NullPointerException.class)
+ public void testNullPointerException1(Locale loc)
+ throws NullPointerException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat(null);
+ }
+
+ @Test(dataProvider = "dfAllLocalesObj",
+ expectedExceptions = NullPointerException.class)
+ public void testNullPointerException2(Locale loc)
+ throws NullPointerException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat(null, new DateFormatSymbols());
+ }
+
+ @Test(dataProvider = "dfAllLocalesObj",
+ expectedExceptions = NullPointerException.class)
+ public void testNullPointerException3(Locale loc)
+ throws NullPointerException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat(null, Locale.getDefault());
+ }
+
+ @Test(dataProvider = "dfAllLocalesObj",
+ expectedExceptions = NullPointerException.class)
+ public void testNullPointerException4(Locale loc)
+ throws NullPointerException {
+ Locale.setDefault(loc);
+ new SimpleDateFormat().applyPattern(null);
+ }
+
+ @Test(dataProvider = "validPatternObj")
+ //check Constructors for valid pattern
+ public void testValidPattern(String pattern, Locale loc) {
+ Locale.setDefault(loc);
+ new SimpleDateFormat(pattern);
+ new SimpleDateFormat(pattern, new DateFormatSymbols());
+ new SimpleDateFormat(pattern, Locale.getDefault());
+ new SimpleDateFormat().applyPattern(pattern);
+ }
+}
--- a/test/jdk/java/util/RandomAccess/Basic.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/util/RandomAccess/Basic.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,111 +21,148 @@
* questions.
*/
-/*
+/**
* @test
- * @bug 4327164
+ * @bug 4327164 8229338
* @summary Basic test for new RandomAccess interface
+ * @run testng Basic
*/
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-import java.util.RandomAccess;
-import java.util.Vector;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.assertEquals;
+
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.function.Function;
+import java.util.function.Supplier;
public class Basic {
- public static void main(String[] args) throws Exception {
- List a0 = Arrays.asList(new String[] { "a", "b", "c" });
- List a[] = { a0, new ArrayList(a0), new LinkedList(a0),
- new Vector(a0) };
- if (!(a[0] instanceof RandomAccess))
- throw new Exception("Arrays.asList doesn't implement RandomAccess");
- if (!(a[1] instanceof RandomAccess))
- throw new Exception("ArrayList doesn't implement RandomAccess");
- if (a[2] instanceof RandomAccess)
- throw new Exception("LinkedList implements RandomAccess");
- if (!(a[3] instanceof RandomAccess))
- throw new Exception("Vector doesn't implement RandomAccess");
-
- for (int i = 0; i < a.length; i++) {
- List t = a[i];
- List ut = Collections.unmodifiableList(t);
- List st = Collections.synchronizedList(t);
+ /*
+ * Lists which implement Random Access interface
+ */
+ @DataProvider(name = "testLists")
+ public Object[][] testData() {
+ var intArray = new Integer[100];
+ var stack = new Stack<>();
+ var random = new Random();
+ for (int i = 0; i < 100; i++) {
+ var r = random.nextInt(100);
+ stack.push(r);
+ intArray[i] = r;
+ }
+ List<Integer> list = Arrays.asList(intArray);
+ return new Object[][]{
+ {list, true, "Arrays.asList"},
+ {stack, true, "Stack"},
+ {new ArrayList<>(list), true, "ArrayList"},
+ {new LinkedList<>(list), false, "LinkedList"},
+ {new Vector<>(list), true, "Vector"},
+ {new CopyOnWriteArrayList<>(list), true, "CopyOnWriteArrayList"}
+ };
+ }
- boolean random = t instanceof RandomAccess;
- if ((ut instanceof RandomAccess) != random)
- throw new Exception(
- "Unmodifiable fails to preserve RandomAccess: " + i);
- if ((st instanceof RandomAccess) != random)
- throw new Exception(
- "Synchronized fails to preserve RandomAccess: " + i);
+ @Test(dataProvider = "testLists")
+ public void testRandomAccess(List<Integer> list, boolean expectedRA, String failMsg) {
+
+ var actualRA = list instanceof RandomAccess;
+ assertEquals(actualRA, expectedRA, failMsg);
+
+ List<Integer> unmodList = Collections.unmodifiableList(list);
+ List<Integer> syncList = Collections.synchronizedList(list);
+ assertEquals((unmodList instanceof RandomAccess), actualRA,
+ "Unmodifiable fails to preserve RandomAccess");
+ assertEquals((syncList instanceof RandomAccess), actualRA,
+ "Synchronized fails to preserve RandomAccess");
- while (t.size() > 0) {
- t = t.subList(0, t.size() - 1);
- if ((t instanceof RandomAccess) != random)
- throw new Exception(
- "SubList fails to preserve RandomAccess: " + i
- + ", " + t.size());
+ while (list.size() > 0) {
+ list = list.subList(0, list.size() - 1);
+ assertEquals((list instanceof RandomAccess), actualRA,
+ "SubList fails to preserve RandomAccess: " + list.size());
- ut = ut.subList(0, ut.size() - 1);
- if ((ut instanceof RandomAccess) != random)
- throw new Exception(
- "SubList(unmodifiable) fails to preserve RandomAccess: "
- + i + ", " + ut.size());
+ unmodList = unmodList.subList(0, unmodList.size() - 1);
+ assertEquals((unmodList instanceof RandomAccess), actualRA,
+ "SubList(unmodifiable) fails to preserve RandomAccess: "
+ + unmodList.size());
+
+ syncList = syncList.subList(0, syncList.size() - 1);
+ assertEquals((syncList instanceof RandomAccess), actualRA,
+ "SubList(synchronized) fails to preserve RandomAccess: "
+ + syncList.size());
+ }
+ }
- st = st.subList(0, st.size() - 1);
- if ((st instanceof RandomAccess) != random)
- throw new Exception(
- "SubList(synchronized) fails to preserve RandomAccess: "
- + i + ", " + st.size());
- }
- }
+ @Test(dataProvider = "testLists")
+ public void testListCopy(List<Integer> list, boolean expectedRA, String failMsg) {
+ ArrayList testCollection = new ArrayList<>(Collections.nCopies(100, 0));
+ // Test that copy works on random & sequential access
+ Collections.copy(list, testCollection);
+ assertEquals(list, testCollection, "Copy failed: " + failMsg);
+ }
+
+ @Test(dataProvider = "testLists")
+ public void testListFill(List<Integer> list, boolean expectedRA, String failMsg) {
+ ArrayList testCollection = new ArrayList<>(Collections.nCopies(100, 0));
+ // Test that copy works on random & sequential access
+ Collections.fill(list, 0);
+ assertEquals(list, testCollection, "Fill failed: " + failMsg);
+ }
- // Test that shuffle works the same on random and sequential access
- List al = new ArrayList();
- for (int j = 0; j < 100; j++)
- al.add(Integer.valueOf(2 * j));
- List ll = new LinkedList(al);
- Random r1 = new Random(666), r2 = new Random(666);
- for (int i = 0; i < 100; i++) {
- Collections.shuffle(al, r1);
- Collections.shuffle(ll, r2);
- if (!al.equals(ll))
- throw new Exception("Shuffle failed: " + i);
- }
+ /*
+ * Test that shuffle and binarySearch work the same on random and sequential access lists.
+ */
+ @DataProvider(name = "testFactoryLists")
+ public Object[][] testDataFactory() {
+ return new Object[][]{
+ {"ArrayList -> LinkedList", supplier(ArrayList::new), copyCtor(LinkedList::new)},
+ {"CopyOnWriteArrayList -> Stack", supplier(CopyOnWriteArrayList::new),
+ copyCtor((list) -> { var s = new Stack();s.addAll(list);return s; })}
+ };
+ }
+
+ private Supplier<List<Integer>> supplier(Supplier<List<Integer>> supplier) {
+ return supplier;
+ }
- // Test that fill works on random & sequential access
- List gumbyParade = Collections.nCopies(100, "gumby");
- Collections.fill(al, "gumby");
- if (!al.equals(gumbyParade))
- throw new Exception("ArrayList fill failed");
- Collections.fill(ll, "gumby");
- if (!ll.equals(gumbyParade))
- throw new Exception("LinkedList fill failed");
+ private Function<List<Integer>, List<Integer>> copyCtor(Function<List<Integer>, List<Integer>> ctor) {
+ return ctor;
+ }
+
+ @Test(dataProvider = "testFactoryLists")
+ public void testListShuffle(String description, Supplier<List<Integer>> randomAccessListSupplier,
+ Function<List<Integer>, List<Integer>> otherListFactory) {
- // Test that copy works on random & sequential access
- List pokeyParade = Collections.nCopies(100, "pokey");
- Collections.copy(al, pokeyParade);
- if (!al.equals(pokeyParade))
- throw new Exception("ArrayList copy failed");
- Collections.copy(ll, pokeyParade);
- if (!ll.equals(pokeyParade))
- throw new Exception("LinkedList copy failed");
+ //e.g: ArrayList<Integer> al = new ArrayList<>();
+ List<Integer> l1 = randomAccessListSupplier.get();
+ for (int j = 0; j < 100; j++) {
+ l1.add(Integer.valueOf(2 * j));
+ }
+ // e.g: List<Integer> ll = new LinkedList<>(al);
+ List<Integer> l2 = otherListFactory.apply(l1);
+ for (int i = 0; i < 100; i++) {
+ Collections.shuffle(l1, new Random(666));
+ Collections.shuffle(l2, new Random(666));
+ assertEquals(l1, l2, "Shuffle failed: " + description);
+ }
+ }
- // Test that binarySearch works the same on random & sequential access
- al = new ArrayList();
- for (int i = 0; i < 10000; i++)
- al.add(Integer.valueOf(2 * i));
- ll = new LinkedList(al);
+ @Test(dataProvider = "testFactoryLists")
+ public void testListBinarySearch(String description, Supplier<List<Integer>> randomAccessListSupplier,
+ Function<List<Integer>, List<Integer>> otherListFactory) {
+
+ //e.g: ArrayList<Integer> al = new ArrayList<>();
+ List<Integer> l1 = randomAccessListSupplier.get();
+ for (int i = 0; i < 10000; i++) {
+ l1.add(Integer.valueOf(2 * i));
+ }
+ // e.g: List<Integer> ll = new LinkedList<>(al);
+ List<Integer> l2 = otherListFactory.apply(l1);
for (int i = 0; i < 500; i++) {
- Integer key = Integer.valueOf(r1.nextInt(20000));
- if (Collections.binarySearch(al, key) != Collections
- .binarySearch(ll, key))
- throw new Exception("Binary search failed: " + i);
+ Integer key = Integer.valueOf(new Random(666).nextInt(20000));
+ assertEquals(Collections.binarySearch(l1, key), Collections
+ .binarySearch(l2, key), "Binary search failed: " + description);
}
}
}
--- a/test/jdk/java/util/concurrent/tck/JSR166TestCase.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/util/concurrent/tck/JSR166TestCase.java Fri Oct 11 12:08:01 2019 +0530
@@ -76,6 +76,7 @@
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.lang.management.ManagementFactory;
+import java.lang.management.LockInfo;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.lang.reflect.Constructor;
@@ -270,6 +271,9 @@
}
}
+ private static final ThreadMXBean THREAD_MXBEAN
+ = ManagementFactory.getThreadMXBean();
+
/**
* The scaling factor to apply to standard delays used in tests.
* May be initialized from any of:
@@ -1157,9 +1161,8 @@
}
}
- ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
System.err.println("------ stacktrace dump start ------");
- for (ThreadInfo info : threadMXBean.dumpAllThreads(true, true))
+ for (ThreadInfo info : THREAD_MXBEAN.dumpAllThreads(true, true))
if (threadOfInterest(info))
System.err.print(info);
System.err.println("------ stacktrace dump end ------");
@@ -1188,6 +1191,17 @@
}
/**
+ * Returns the thread's blocker's class name, if any, else null.
+ */
+ String blockerClassName(Thread thread) {
+ ThreadInfo threadInfo; LockInfo lockInfo;
+ if ((threadInfo = THREAD_MXBEAN.getThreadInfo(thread.getId(), 0)) != null
+ && (lockInfo = threadInfo.getLockInfo()) != null)
+ return lockInfo.getClassName();
+ return null;
+ }
+
+ /**
* Checks that future.get times out, with the default timeout of
* {@code timeoutMillis()}.
*/
@@ -1486,6 +1500,14 @@
}
/**
+ * Returns a new started daemon Thread running the given action,
+ * wrapped in a CheckedRunnable.
+ */
+ Thread newStartedThread(Action action) {
+ return newStartedThread(checkedRunnable(action));
+ }
+
+ /**
* Waits for the specified time (in milliseconds) for the thread
* to terminate (using {@link Thread#join(long)}), else interrupts
* the thread (in the hope that it may terminate later) and fails.
@@ -1532,6 +1554,13 @@
}
}
+ Runnable checkedRunnable(Action action) {
+ return new CheckedRunnable() {
+ public void realRun() throws Throwable {
+ action.run();
+ }};
+ }
+
public abstract class ThreadShouldThrow extends Thread {
protected abstract void realRun() throws Throwable;
--- a/test/jdk/java/util/concurrent/tck/ReentrantLockTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/util/concurrent/tck/ReentrantLockTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -39,10 +39,13 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
+import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import junit.framework.Test;
@@ -1222,4 +1225,65 @@
assertFalse(thread.isAlive());
}
}
+
+ /**
+ * ThreadMXBean reports the blockers that we expect.
+ */
+ public void testBlockers() {
+ if (!testImplementationDetails) return;
+ final boolean fair = randomBoolean();
+ final boolean timedAcquire = randomBoolean();
+ final boolean timedAwait = randomBoolean();
+ final String syncClassName = fair
+ ? "ReentrantLock$FairSync"
+ : "ReentrantLock$NonfairSync";
+ final String conditionClassName
+ = "AbstractQueuedSynchronizer$ConditionObject";
+ final Thread.State expectedAcquireState = timedAcquire
+ ? Thread.State.TIMED_WAITING
+ : Thread.State.WAITING;
+ final Thread.State expectedAwaitState = timedAwait
+ ? Thread.State.TIMED_WAITING
+ : Thread.State.WAITING;
+ final Lock lock = new ReentrantLock(fair);
+ final Condition condition = lock.newCondition();
+ final AtomicBoolean conditionSatisfied = new AtomicBoolean(false);
+ lock.lock();
+ final Thread thread = newStartedThread((Action) () -> {
+ if (timedAcquire)
+ lock.tryLock(LONGER_DELAY_MS, MILLISECONDS);
+ else
+ lock.lock();
+ while (!conditionSatisfied.get())
+ if (timedAwait)
+ condition.await(LONGER_DELAY_MS, MILLISECONDS);
+ else
+ condition.await();
+ });
+ Callable<Boolean> waitingForLock = () -> {
+ String className;
+ return thread.getState() == expectedAcquireState
+ && (className = blockerClassName(thread)) != null
+ && className.endsWith(syncClassName);
+ };
+ waitForThreadToEnterWaitState(thread, waitingForLock);
+
+ lock.unlock();
+ Callable<Boolean> waitingForCondition = () -> {
+ String className;
+ return thread.getState() == expectedAwaitState
+ && (className = blockerClassName(thread)) != null
+ && className.endsWith(conditionClassName);
+ };
+ waitForThreadToEnterWaitState(thread, waitingForCondition);
+
+ // politely release the waiter
+ conditionSatisfied.set(true);
+ lock.lock();
+ try {
+ condition.signal();
+ } finally { lock.unlock(); }
+
+ awaitTermination(thread);
+ }
}
--- a/test/jdk/java/util/concurrent/tck/ReentrantReadWriteLockTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/util/concurrent/tck/ReentrantReadWriteLockTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -38,6 +38,7 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
+import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
@@ -1707,4 +1708,64 @@
assertTrue(lock.writeLock().toString().contains("Unlocked"));
}
+ /**
+ * ThreadMXBean reports the blockers that we expect.
+ */
+ public void testBlockers() {
+ if (!testImplementationDetails) return;
+ final boolean fair = randomBoolean();
+ final boolean timedAcquire = randomBoolean();
+ final boolean timedAwait = randomBoolean();
+ final String syncClassName = fair
+ ? "ReentrantReadWriteLock$FairSync"
+ : "ReentrantReadWriteLock$NonfairSync";
+ final String conditionClassName
+ = "AbstractQueuedSynchronizer$ConditionObject";
+ final Thread.State expectedAcquireState = timedAcquire
+ ? Thread.State.TIMED_WAITING
+ : Thread.State.WAITING;
+ final Thread.State expectedAwaitState = timedAwait
+ ? Thread.State.TIMED_WAITING
+ : Thread.State.WAITING;
+ final Lock lock = new ReentrantReadWriteLock(fair).writeLock();
+ final Condition condition = lock.newCondition();
+ final AtomicBoolean conditionSatisfied = new AtomicBoolean(false);
+ lock.lock();
+ final Thread thread = newStartedThread((Action) () -> {
+ if (timedAcquire)
+ lock.tryLock(LONGER_DELAY_MS, MILLISECONDS);
+ else
+ lock.lock();
+ while (!conditionSatisfied.get())
+ if (timedAwait)
+ condition.await(LONGER_DELAY_MS, MILLISECONDS);
+ else
+ condition.await();
+ });
+ Callable<Boolean> waitingForLock = () -> {
+ String className;
+ return thread.getState() == expectedAcquireState
+ && (className = blockerClassName(thread)) != null
+ && className.endsWith(syncClassName);
+ };
+ waitForThreadToEnterWaitState(thread, waitingForLock);
+
+ lock.unlock();
+ Callable<Boolean> waitingForCondition = () -> {
+ String className;
+ return thread.getState() == expectedAwaitState
+ && (className = blockerClassName(thread)) != null
+ && className.endsWith(conditionClassName);
+ };
+ waitForThreadToEnterWaitState(thread, waitingForCondition);
+
+ // politely release the waiter
+ conditionSatisfied.set(true);
+ lock.lock();
+ try {
+ condition.signal();
+ } finally { lock.unlock(); }
+
+ awaitTermination(thread);
+ }
}
--- a/test/jdk/java/util/concurrent/tck/tck.policy Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/util/concurrent/tck/tck.policy Fri Oct 11 12:08:01 2019 +0530
@@ -12,4 +12,6 @@
permission java.lang.RuntimePermission "accessDeclaredMembers";
permission java.io.FilePermission "<<ALL FILES>>", "read";
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
+ // Allows test methods to inspect test thread state
+ permission java.lang.management.ManagementPermission "monitor";
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/util/stream/test/org/openjdk/tests/java/util/stream/CollectorExample.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package org.openjdk.tests.java.util.stream;
+
+/*
+ * THE CONTENTS OF THIS FILE HAVE TO BE IN SYNC WITH THE EXAMPLES USED
+ * IN THE JAVADOC.
+ *
+ * @test
+ * @bug 8231161
+ * @compile CollectorExample.java
+ * @summary Compilation test only. Compile code snippets from
+ * java.util.stream.Collector class-level API documentation
+ */
+
+import java.util.*;
+import java.util.function.BiConsumer;
+import java.util.function.BinaryOperator;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collector;
+import java.util.stream.Collectors;
+
+public class CollectorExample {
+
+ // Empty helper classes
+
+ class Widget {
+ }
+
+ class Employee {
+ public int getSalary() {
+ return 0; // money isn't everything
+ }
+
+ public Department getDepartment() {
+ return new Department();
+ }
+ }
+
+ class Department {
+ }
+
+ <T, A, R> void testSnippet1(Collector<T, A, R> collector, T t1, T t2) {
+
+ Supplier<A> supplier = collector.supplier();
+ BiConsumer<A, T> accumulator = collector.accumulator();
+ BinaryOperator<A> combiner = collector.combiner();
+ Function<A, R> finisher = collector.finisher();
+
+ // Example start
+ A a1 = supplier.get();
+ accumulator.accept(a1, t1);
+ accumulator.accept(a1, t2);
+ R r1 = finisher.apply(a1);
+
+ A a2 = supplier.get();
+ accumulator.accept(a2, t1);
+ A a3 = supplier.get();
+ accumulator.accept(a3, t2);
+ R r2 = finisher.apply(combiner.apply(a2, a3));
+ }
+
+ void testSnippet2() {
+ Collector<Widget, ?, TreeSet<Widget>> intoSet =
+ Collector.of(TreeSet::new, TreeSet::add,
+ (left, right) -> { left.addAll(right); return left; });
+ }
+
+ <T, A, R> void testSnippet3(Collector<T, A, R> collector, Collection<T> data) {
+ A container = collector.supplier().get();
+ for (T t : data)
+ collector.accumulator().accept(container, t);
+ collector.finisher().apply(container);
+ }
+
+ void testSnippet4and5() {
+ Collector<Employee, ?, Integer> summingSalaries
+ = Collectors.summingInt(Employee::getSalary);
+
+ Collector<Employee, ?, Map<Department, Integer>> summingSalariesByDept
+ = Collectors.groupingBy(Employee::getDepartment, summingSalaries);
+ }
+}
--- a/test/jdk/java/util/zip/FlaterTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/java/util/zip/FlaterTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,11 @@
/**
* @test
- * @bug 6348045 6341887
+ * @bug 6348045 6341887 8231770
* @summary GZipOutputStream/InputStream goes critical(calls JNI_Get*Critical)
- * and causes slowness. This test uses Deflater and Inflater directly.
+ * and causes slowness. This test uses Deflater and Inflater directly.
* @key randomness
+ * @run main/othervm -Xcheck:jni FlaterTest
*/
import java.nio.*;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/javax/management/mxbean/ThreadStartTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8231666
+ * @summary Test checks that new threads could be successfully started when the thread
+ * table introduced in 8185005 is growing. The test enables the thread table by calling
+ * ThreadMXBean.getThreadInfo() and then creates a number of threads to force the thread
+ * table to grow.
+ *
+ * @run main ThreadStartTest
+ */
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadMXBean;
+
+public class ThreadStartTest {
+ public static void main(String[] args) {
+
+ ThreadMXBean mbean = ManagementFactory.getThreadMXBean();
+ // Enable thread table
+ mbean.getThreadInfo(Thread.currentThread().getId());
+
+ // Create a large number of threads to make the thread table grow
+ for (int i = 0; i < 1000; i++) {
+ Thread t = new Thread(() -> {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException ex) {
+ }
+ });
+ t.start();
+ }
+ }
+}
--- a/test/jdk/jdk/jfr/jcmd/TestJcmdConfigure.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/jdk/jfr/jcmd/TestJcmdConfigure.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,7 @@
package jdk.jfr.jcmd;
+import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
@@ -56,8 +57,8 @@
private static final String SAMPLE_THREADS = "samplethreads";
private static final String UNSUPPORTED_OPTION = "unsupportedoption";
- private static final String REPOSITORYPATH_1 = "./repo1";
- private static final String REPOSITORYPATH_2 = "./repo2";
+ private static final String REPOSITORYPATH_1 = "." + File.pathSeparator + "repo1";
+ private static final String REPOSITORYPATH_2 = "." + File.pathSeparator + "repo2";
private static final String REPOSITORYPATH_SETTING_1 = "repositorypath="+REPOSITORYPATH_1;
private static final String REPOSITORYPATH_SETTING_2 = "repositorypath="+REPOSITORYPATH_2;
--- a/test/jdk/jdk/jfr/jmx/TestRecordingOptions.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/jdk/jfr/jmx/TestRecordingOptions.java Fri Oct 11 12:08:01 2019 +0530
@@ -25,6 +25,7 @@
package jdk.jfr.jmx;
+import java.io.File;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
@@ -49,7 +50,7 @@
options.put("dumpOnExit", "false");
options.put("disk", "false");
options.put("duration", "1 h"); // don't want recording to stop
-
+ options.put("destination", "." + File.separator + "dump.jfr");
FlightRecorderMXBean bean = JmxHelper.getFlighteRecorderMXBean();
long recId = bean.newRecording();
Map<String, String> defaults = bean.getRecordingOptions(recId);
@@ -72,6 +73,7 @@
Asserts.assertEquals(outOptions.get("dumpOnExit"), "false", "Wrong dumpOnExit");
Asserts.assertEquals(outOptions.get("disk"), "false", "Wrong disk");
Asserts.assertEquals(outOptions.get("duration"), "1 h", "Wrong duration");
+ Asserts.assertEquals(outOptions.get("destination"), "." + File.separator + "dump.jfr", "Wrong destination");
// try empty map
bean.setRecordingOptions(recId, new HashMap<>());
@@ -116,6 +118,7 @@
nullMap.put("dumpOnExit", null);
nullMap.put("disk", null);
nullMap.put("duration", null);
+ nullMap.put("destination", null);
bean.setRecordingOptions(recId, nullMap);
Asserts.assertEquals(bean.getRecordingOptions(recId), defaults);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/nio/zipfs/CompressionModeTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import org.testng.annotations.*;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.FileSystem;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.SecureRandom;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+import static java.lang.String.format;
+import static java.util.stream.Collectors.joining;
+import static org.testng.Assert.*;
+
+/**
+ * @test
+ * @bug 8231093
+ * @summary Test Zip FS compressionMethod property
+ * @modules jdk.zipfs
+ * @run testng CompressionModeTest
+ */
+public class CompressionModeTest {
+
+ private static final Path HERE = Path.of(".");
+
+ /**
+ * Number of ZIP entries to create
+ */
+ private static final int ENTRIES = 5;
+
+ /**
+ * Value used for creating the required entries in a ZIP or JAR file
+ */
+ private static final String ZIP_FILE_VALUE = "US Open 2019";
+ private static final byte[] ZIP_FILE_ENTRY =
+ ZIP_FILE_VALUE.getBytes(StandardCharsets.UTF_8);
+
+ private static final SecureRandom random = new SecureRandom();
+
+ /**
+ * Validate that you can create a ZIP file with and without compression
+ * and that entries are created with the specified compression method.
+ *
+ * @param env Properties used for creating the ZIP Filesystem
+ * @param compression Indicates whether the files are DEFLATED(default)
+ * or STORED
+ * @throws Exception If an error occurs during the creation, verification or
+ * deletion of the ZIP file
+ */
+ @Test(dataProvider = "validCompressionMethods", enabled = true)
+ public void testValidCompressionMehods(Map<String, String> env,
+ int compression) throws Exception {
+
+ System.out.printf("ZIP FS Map = %s, Compression mode= %s%n ",
+ formatMap(env), compression);
+
+ Path zipfile = generatePath(HERE, "test", ".zip");
+ Files.deleteIfExists(zipfile);
+ createZipFile(zipfile, env, ENTRIES);
+ verify(zipfile, compression, ENTRIES, 0);
+ Files.deleteIfExists(zipfile);
+ }
+
+ /**
+ * Validate that an IllegalArgumentException is thrown when an invalid
+ * value is specified for the compressionMethod property.
+ *
+ * @param env Properties used for creating the ZIP Filesystem
+ * @throws Exception if an error occurs other than the expected
+ * IllegalArgumentException
+ */
+ @Test(dataProvider = "invalidCompressionMethod")
+ public void testInvalidCompressionMethod(Map<String, String> env) throws Exception {
+ System.out.printf("ZIP FS Map = %s%n ", formatMap(env));
+ Path zipfile = generatePath(HERE, "test", ".zip");
+ Files.deleteIfExists(zipfile);
+ assertThrows(IllegalArgumentException.class, () ->
+ createZipFile(zipfile, env, ENTRIES));
+ Files.deleteIfExists(zipfile);
+ }
+
+ /**
+ * Create a ZIP File System using the specified properties and a ZIP file
+ * with the specified number of entries
+ *
+ * @param zipFile Path to the ZIP File to create
+ * @param env Properties used for creating the ZIP Filesystem
+ * @param entries Number of entries to add to the ZIP File
+ * @throws IOException If an error occurs while creating the ZIP file
+ */
+ private void createZipFile(Path zipFile, Map<String, String> env,
+ int entries) throws IOException {
+ System.out.printf("Creating file = %s%n", zipFile);
+ try (FileSystem zipfs =
+ FileSystems.newFileSystem(zipFile, env)) {
+
+ for (int i = 0; i < entries; i++) {
+ Files.writeString(zipfs.getPath("Entry-" + i), ZIP_FILE_VALUE);
+ }
+ }
+ }
+
+ /**
+ * DataProvider used to validate that you can create a ZIP file with and
+ * without compression.
+ */
+ @DataProvider(name = "validCompressionMethods")
+ private Object[][] validCompressionMethods() {
+ return new Object[][]{
+ {Map.of("create", "true"), ZipEntry.DEFLATED},
+ {Map.of("create", "true", "noCompression", "true"),
+ ZipEntry.STORED},
+ {Map.of("create", "true", "noCompression", "false"),
+ ZipEntry.DEFLATED},
+ {Map.of("create", "true", "compressionMethod", "STORED"),
+ ZipEntry.STORED},
+ {Map.of("create", "true", "compressionMethod", "DEFLATED"),
+ ZipEntry.DEFLATED},
+ {Map.of("create", "true", "compressionMethod", "stored"),
+ ZipEntry.STORED},
+ {Map.of("create", "true", "compressionMethod", "deflated"),
+ ZipEntry.DEFLATED}
+ };
+ }
+
+ /**
+ * DataProvider used to validate that an IllegalArgumentException is thrown
+ * for an invalid value for the compressionMethod property.
+ */
+ @DataProvider(name = "invalidCompressionMethod")
+ private Object[][] invalidCompressionMethod() {
+ HashMap<String, String> map = new HashMap<>();
+ map.put("create", "true");
+ map.put("compressionMethod", null);
+ return new Object[][]{
+ {map},
+ {Map.of("create", "true", "compressionMethod", "")},
+ {Map.of("create", "true", "compressionMethod",
+ Integer.parseInt("5"))},
+ {Map.of("create", "true", "compressionMethod", "invalid")}
+ };
+ }
+
+ /**
+ * Verify that the given path is a ZIP file containing the
+ * expected entries.
+ *
+ * @param zipfile ZIP file to be validated
+ * @param method Expected Compression method: STORED or DEFLATED
+ * @param entries Number of expected entries
+ * @param start Starting number for verifying entries
+ * @throws Exception If an error occurs while examining the ZIP file
+ */
+ private static void verify(Path zipfile, int method, int entries,
+ int start) throws Exception {
+ // check entries with ZIP API
+ try (ZipFile zf = new ZipFile(zipfile.toFile())) {
+ // check entry count
+ assertEquals(entries, zf.size());
+
+ // check compression method and content of each entry
+ for (int i = start; i < entries; i++) {
+ ZipEntry ze = zf.getEntry("Entry-" + i);
+ assertNotNull(ze);
+ assertEquals(method, ze.getMethod());
+ try (InputStream is = zf.getInputStream(ze)) {
+ byte[] bytes = is.readAllBytes();
+ assertTrue(Arrays.equals(bytes, ZIP_FILE_ENTRY));
+ }
+ }
+ }
+ // check entries with FileSystem API
+ try (FileSystem fs = FileSystems.newFileSystem(zipfile)) {
+
+ // check entry count
+ Path top = fs.getPath("/");
+ long count = Files.find(top, Integer.MAX_VALUE, (path, attrs) ->
+ attrs.isRegularFile() || (attrs.isDirectory() &&
+ path.getFileName() != null &&
+ path.getFileName().toString().equals("META-INF")))
+ .count();
+ assertEquals(entries, count);
+
+ // check content of each entry
+ for (int i = start; i < entries; i++) {
+ Path file = fs.getPath("Entry-" + i);
+ byte[] bytes = Files.readAllBytes(file);
+ assertTrue(Arrays.equals(bytes, ZIP_FILE_ENTRY));
+ }
+ }
+ }
+
+ /**
+ * Generate a temporary file Path
+ *
+ * @param dir Directory used to create the path
+ * @param prefix The prefix string used to create the path
+ * @param suffix The suffix string used to create the path
+ * @return Path that was generated
+ */
+ private static Path generatePath(Path dir, String prefix, String suffix) {
+ long n = random.nextLong();
+ String s = prefix + Long.toUnsignedString(n) + suffix;
+ Path name = dir.getFileSystem().getPath(s);
+ // the generated name should be a simple file name
+ if (name.getParent() != null)
+ throw new IllegalArgumentException("Invalid prefix or suffix");
+ return dir.resolve(name);
+ }
+
+ /**
+ * Utility method to return a formatted String of the key:value entries for
+ * a Map
+ *
+ * @param env Map to format
+ * @return Formatted string of the Map entries
+ */
+ private static String formatMap(Map<String, String> env) {
+ return env.entrySet().stream()
+ .map(e -> format("(%s:%s)", e.getKey(), e.getValue()))
+ .collect(joining(", "));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/nio/zipfs/NonExistentPathTests.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+import org.testng.annotations.BeforeTest;
+import org.testng.annotations.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.file.FileSystemNotFoundException;
+import java.nio.file.FileSystems;
+import java.nio.file.Path;
+import java.util.Map;
+
+import static org.testng.Assert.assertThrows;
+
+/**
+ * @test
+ * @bug 8223771
+ * @summary Validate the correct Exception is thrown if the Zip/JAR is not found
+ *
+ * @modules jdk.zipfs
+ * @run testng/othervm NonExistentPathTests
+ */
+public class NonExistentPathTests {
+ private static final String ZIPFS_SCHEME = "jar";
+ private static final ClassLoader CLASS_LOADER = null;
+ // Non-exist JAR file to test against
+ private static final Path INVALID_JAR_FILE = Path.of("jarDoesNotExist.jar");
+ // Standard Exception expected from FileSystems.newFileSystem
+ private static Class<? extends Exception> testException = IOException.class;
+
+ /**
+ * Validate that the correct Exception is thrown when specifying a Path
+ * to a JAR that does not exist and is not being created.
+ */
+ @Test
+ public void testNewFileSystemWithPath() {
+ assertThrows(testException, () ->
+ FileSystems.newFileSystem(INVALID_JAR_FILE));
+ assertThrows(testException, () ->
+ FileSystems.newFileSystem(INVALID_JAR_FILE, Map.of()));
+ assertThrows(testException, () ->
+ FileSystems.newFileSystem(INVALID_JAR_FILE, CLASS_LOADER));
+ assertThrows(testException, () ->
+ FileSystems.newFileSystem(INVALID_JAR_FILE, Map.of(), CLASS_LOADER));
+ }
+
+ /**
+ * Validate that the correct Exception is thrown when specifying a URI
+ * to a JAR that does not exist and is not being created.
+ */
+ @Test
+ public void testNewFileSystemWithUri() throws Exception {
+ var jarURI = new URI(ZIPFS_SCHEME,
+ INVALID_JAR_FILE.toUri().toString(), null);
+
+ assertThrows(testException, () ->
+ FileSystems.newFileSystem(jarURI, Map.of()));
+
+ assertThrows(testException, () ->
+ FileSystems.newFileSystem(jarURI, Map.of(), CLASS_LOADER));
+ }
+}
--- a/test/jdk/jdk/nio/zipfs/jarfs/MultiReleaseJarTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/jdk/nio/zipfs/jarfs/MultiReleaseJarTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 8144355 8144062 8176709 8194070 8193802
+ * @bug 8144355 8144062 8176709 8194070 8193802 8231093
* @summary Test aliasing additions to ZipFileSystem for multi-release jar files
* @library /lib/testlibrary/java/util/jar
* @modules jdk.compiler
@@ -40,6 +40,7 @@
import java.lang.Runtime.Version;
import java.net.URI;
import java.nio.file.*;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
@@ -88,8 +89,7 @@
public Object[][] createStrings() {
return new Object[][]{
{"runtime", MAJOR_VERSION},
- {"-20", 8},
- {"0", 8},
+ {null, 8},
{"8", 8},
{"9", 9},
{Integer.toString(MAJOR_VERSION), MAJOR_VERSION},
@@ -101,8 +101,7 @@
@DataProvider(name="integers")
public Object[][] createIntegers() {
return new Object[][] {
- {Integer.valueOf(-5), 8},
- {Integer.valueOf(0), 8},
+ {null, 8},
{Integer.valueOf(8), 8},
{Integer.valueOf(9), 9},
{Integer.valueOf(MAJOR_VERSION), MAJOR_VERSION},
@@ -114,6 +113,7 @@
@DataProvider(name="versions")
public Object[][] createVersions() {
return new Object[][] {
+ {null, 8},
{Version.parse("8"), 8},
{Version.parse("9"), 9},
{Version.parse(Integer.toString(MAJOR_VERSION)), MAJOR_VERSION},
@@ -122,6 +122,20 @@
};
}
+ @DataProvider(name="invalidVersions")
+ public Object[][] invalidVersions() {
+ return new Object[][] {
+ {Map.of("releaseVersion", "")},
+ {Map.of("releaseVersion", "invalid")},
+ {Map.of("releaseVersion", "0")},
+ {Map.of("releaseVersion", "-1")},
+ {Map.of("releaseVersion", "11.0.1")},
+ {Map.of("releaseVersion", new ArrayList<Long>())},
+ {Map.of("releaseVersion", Integer.valueOf(0))},
+ {Map.of("releaseVersion", Integer.valueOf(-1))}
+ };
+ }
+
// Not the best test but all I can do since ZipFileSystem and JarFileSystem
// are not public, so I can't use (fs instanceof ...)
@Test
@@ -131,7 +145,7 @@
try (FileSystem fs = FileSystems.newFileSystem(mruri, env)) {
Assert.assertTrue(readAndCompare(fs, 8));
}
- env.put("multi-release", "runtime");
+ env.put("releaseVersion", "runtime");
// a configuration and jar file is multi-release
try (FileSystem fs = FileSystems.newFileSystem(mruri, env)) {
Assert.assertTrue(readAndCompare(fs, MAJOR_VERSION));
@@ -150,30 +164,67 @@
@Test(dataProvider="strings")
public void testStrings(String value, int expected) throws Throwable {
+ stringEnv.put("releaseVersion", value);
+ runTest(stringEnv, expected);
+ }
+
+ @Test(dataProvider="integers")
+ public void testIntegers(Integer value, int expected) throws Throwable {
+ integerEnv.put("releaseVersion", value);
+ runTest(integerEnv, expected);
+ }
+
+ @Test(dataProvider="versions")
+ public void testVersions(Version value, int expected) throws Throwable {
+ versionEnv.put("releaseVersion", value);
+ runTest(versionEnv, expected);
+ }
+
+ @Test
+ public void testShortJar() throws Throwable {
+ integerEnv.put("releaseVersion", Integer.valueOf(MAJOR_VERSION));
+ runTest(smruri, integerEnv, MAJOR_VERSION);
+ integerEnv.put("releaseVersion", Integer.valueOf(9));
+ runTest(smruri, integerEnv, 8);
+ }
+
+ /**
+ * Validate that an invalid value for the "releaseVersion" property throws
+ * an {@code IllegalArgumentException}
+ * @param env Zip FS map
+ * @throws Throwable Exception thrown for anything other than the expected
+ * IllegalArgumentException
+ */
+ @Test(dataProvider="invalidVersions")
+ public void testInvalidVersions(Map<String,?> env) throws Throwable {
+ Assert.assertThrows(IllegalArgumentException.class, () ->
+ FileSystems.newFileSystem(Path.of(userdir,
+ "multi-release.jar"), env));
+ }
+
+ // The following tests are for backwards compatibility to validate that
+ // the original property still works
+ @Test(dataProvider="strings")
+ public void testMRStrings(String value, int expected) throws Throwable {
+ stringEnv.clear();
stringEnv.put("multi-release", value);
runTest(stringEnv, expected);
}
@Test(dataProvider="integers")
- public void testIntegers(Integer value, int expected) throws Throwable {
+ public void testMRIntegers(Integer value, int expected) throws Throwable {
+ integerEnv.clear();
integerEnv.put("multi-release", value);
runTest(integerEnv, expected);
}
@Test(dataProvider="versions")
- public void testVersions(Version value, int expected) throws Throwable {
+ public void testMRVersions(Version value, int expected) throws Throwable {
+ versionEnv.clear();
versionEnv.put("multi-release", value);
runTest(versionEnv, expected);
}
- @Test
- public void testShortJar() throws Throwable {
- integerEnv.put("multi-release", Integer.valueOf(MAJOR_VERSION));
- runTest(smruri, integerEnv, MAJOR_VERSION);
- integerEnv.put("multi-release", Integer.valueOf(9));
- runTest(smruri, integerEnv, 8);
- }
-
private void runTest(Map<String,?> env, int expected) throws Throwable {
runTest(mruri, env, expected);
}
@@ -213,7 +264,7 @@
JarBuilder jb = new JarBuilder(jfname);
jb.addAttribute("Multi-Release", "true");
jb.build();
- Map<String,String> env = Map.of("multi-release", "runtime");
+ Map<String,String> env = Map.of("releaseVersion", "runtime");
try (FileSystem fs = FileSystems.newFileSystem(uri, env)) {
Assert.assertTrue(true);
}
@@ -228,7 +279,7 @@
creator.buildCustomMultiReleaseJar(fileName, value, Map.of(),
/*addEntries*/true);
- Map<String,String> env = Map.of("multi-release", "runtime");
+ Map<String,String> env = Map.of("releaseVersion", "runtime");
Path filePath = Paths.get(userdir, fileName);
String ssp = filePath.toUri().toString();
URI customJar = new URI("jar", ssp , null);
--- a/test/jdk/security/infra/java/security/cert/CertPathValidator/certification/ComodoCA.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/security/infra/java/security/cert/CertPathValidator/certification/ComodoCA.java Fri Oct 11 12:08:01 2019 +0530
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 8189131
+ * @bug 8189131 8231887
* @summary Interoperability tests with Comodo RSA, ECC, userTrust RSA, and
* userTrust ECC CAs
* @build ValidatePathWithParams
@@ -112,13 +112,66 @@
// Owner: CN=comodorsacertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=Sectigo Limited,
// STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
+ // OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization, OID.1.3.6.1.4.1.311.60.2.1.3=GB,
+ // SERIALNUMBER=04058690
+ // Issuer: CN=COMODO RSA Extended Validation Secure Server CA, O=COMODO CA Limited, L=Salford,
+ // ST=Greater Manchester, C=GB
+ // Serial number: a0c7cabcc25ed9358ded02cc1d485545
+ // Valid from: Sun Sep 29 17:00:00 PDT 2019 until: Tue Dec 28 15:59:59 PST 2021
+ private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ "MIIH0TCCBrmgAwIBAgIRAKDHyrzCXtk1je0CzB1IVUUwDQYJKoZIhvcNAQELBQAw\n" +
+ "gZIxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO\n" +
+ "BgNVBAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMTgwNgYD\n" +
+ "VQQDEy9DT01PRE8gUlNBIEV4dGVuZGVkIFZhbGlkYXRpb24gU2VjdXJlIFNlcnZl\n" +
+ "ciBDQTAeFw0xOTA5MzAwMDAwMDBaFw0yMTEyMjgyMzU5NTlaMIIBPjERMA8GA1UE\n" +
+ "BRMIMDQwNTg2OTAxEzARBgsrBgEEAYI3PAIBAxMCR0IxHTAbBgNVBA8TFFByaXZh\n" +
+ "dGUgT3JnYW5pemF0aW9uMQswCQYDVQQGEwJHQjEPMA0GA1UEERMGTTUgM0VRMRAw\n" +
+ "DgYDVQQHEwdTYWxmb3JkMRYwFAYDVQQJEw1UcmFmZm9yZCBSb2FkMRYwFAYDVQQJ\n" +
+ "Ew1FeGNoYW5nZSBRdWF5MSUwIwYDVQQJExwzcmQgRmxvb3IsIDI2IE9mZmljZSBW\n" +
+ "aWxsYWdlMRgwFgYDVQQKEw9TZWN0aWdvIExpbWl0ZWQxGjAYBgNVBAsTEUNPTU9E\n" +
+ "TyBFViBTR0MgU1NMMTgwNgYDVQQDEy9jb21vZG9yc2FjZXJ0aWZpY2F0aW9uYXV0\n" +
+ "aG9yaXR5LWV2LmNvbW9kb2NhLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC\n" +
+ "AQoCggEBAND/eZQBTjpBDsuteKwl+zpTitF8tJzwHAhcQHC2AaLF/GJl1rnjx4Of\n" +
+ "elMhKhN1Od9KU6onHGOd2w4mD4EiYK9TpXwuwTyzfkCmnkqxZjYK3KAJN013o4L+\n" +
+ "8y1zsGVUulpN/GfMaxTb4XdmeSekTP91Phw3xezijBq3sa++1rO5RBaT1IHeHhHv\n" +
+ "iC9WNrG8CIg/j5MyC9i43LZHiRXLER1LzT/MCIRsiG5AEbiYXV5BNd5SiiHtBJ1q\n" +
+ "0ZJH+AxL2ERaT41VCppboZwThmJGGoky9FWjp6z8U6Enx0fAMJIZNEzW6LAJFKPE\n" +
+ "ynEU004jFFCEumPUqqCC4ogxulphY80CAwEAAaOCA3EwggNtMB8GA1UdIwQYMBaA\n" +
+ "FDna/8ooFIqodBMIueQOqdL6fp1pMB0GA1UdDgQWBBQ+S4ZhIrwOoeGs9BBT4uXq\n" +
+ "89Ux/jAOBgNVHQ8BAf8EBAMCBaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggr\n" +
+ "BgEFBQcDAQYIKwYBBQUHAwIwTwYDVR0gBEgwRjA7BgwrBgEEAbIxAQIBBQEwKzAp\n" +
+ "BggrBgEFBQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwBwYFZ4EM\n" +
+ "AQEwVgYDVR0fBE8wTTBLoEmgR4ZFaHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09N\n" +
+ "T0RPUlNBRXh0ZW5kZWRWYWxpZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3JsMIGHBggr\n" +
+ "BgEFBQcBAQR7MHkwUQYIKwYBBQUHMAKGRWh0dHA6Ly9jcnQuY29tb2RvY2EuY29t\n" +
+ "L0NPTU9ET1JTQUV4dGVuZGVkVmFsaWRhdGlvblNlY3VyZVNlcnZlckNBLmNydDAk\n" +
+ "BggrBgEFBQcwAYYYaHR0cDovL29jc3AuY29tb2RvY2EuY29tMDoGA1UdEQQzMDGC\n" +
+ "L2NvbW9kb3JzYWNlcnRpZmljYXRpb25hdXRob3JpdHktZXYuY29tb2RvY2EuY29t\n" +
+ "MIIBfQYKKwYBBAHWeQIEAgSCAW0EggFpAWcAdQDuS723dc5guuFCaR+r4Z5mow9+\n" +
+ "X7By2IMAxHuJeqj9ywAAAW2DAXefAAAEAwBGMEQCIDqP1einOiPHnaG1fOZMDrEc\n" +
+ "RAxjq3vEl94fp4pkmke7AiBsJOvPE6irgcOO1/lnP7NRuln7iPJjU7T20PEK5/rm\n" +
+ "KwB2AFWB1MIWkDYBSuoLm1c8U/DA5Dh4cCUIFy+jqh0HE9MMAAABbYMBd0kAAAQD\n" +
+ "AEcwRQIhALgUI5XxM1NHbJDdr19h2pe3LhzK4tpuB/OQ9BgCyrGXAiBdr6mNCB/G\n" +
+ "rbdVx0u7iezwC7mq7iaWugR3rrWlSA8fWQB2ALvZ37wfinG1k5Qjl6qSe0c4V5UK\n" +
+ "q1LoGpCWZDaOHtGFAAABbYMBd1oAAAQDAEcwRQIgXbG32dagMeLhuZb+LSpJO1vI\n" +
+ "BmxmRnNdiz5FbG9cCbwCIQCr1X9f+ebT5fhlDUNBURUorTtM8QQciBiueBqvHk7+\n" +
+ "1DANBgkqhkiG9w0BAQsFAAOCAQEAM/A/1dgoc5NP1n+w3SX9qWcN7QT7ExdrnZSl\n" +
+ "Ygn0PF2fx4gz7cvNKucbpQJNA4C9awGydyYK8/o5KDUXt3K7eb1OAZ/NZBjygsJs\n" +
+ "ikXvxlBh8oEoqBOfOtr24l0NGUWnP8Qeu/VPcIMER4V8qX+in0pCXkSd67nkp6Bs\n" +
+ "EcqhDPgmzdSC1gQHsZuBdotG14OfdH1cG1bRK6GadISLG1h8BFukVem42B149v8F\n" +
+ "MCIUQAYprAVv2WlTZKBx9XzuK6IK3+klHZ07Jfvjvt7PPG5HKSMWBMnMaTHKcyQI\n" +
+ "G3t91yw7BnNNInZlBSsFtqjbHhDcr7uruZdbi0rerSsi2qDr0w==\n" +
+ "-----END CERTIFICATE-----";
+
+ // Owner: CN=comodorsacertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=Sectigo Limited,
+ // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
// ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
// OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
// Issuer: CN=COMODO RSA Extended Validation Secure Server CA, O=COMODO CA Limited, L=Salford,
// ST=Greater Manchester, C=GB
// Serial number: d3df2597cbed1ab6e02ee82021771614
// Valid from: Wed Nov 28 16:00:00 PST 2018 until: Fri Feb 26 15:59:59 PST 2021
- private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
"MIIH7jCCBtagAwIBAgIRANPfJZfL7Rq24C7oICF3FhQwDQYJKoZIhvcNAQELBQAw\n" +
"gZIxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO\n" +
"BgNVBAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMTgwNgYD\n" +
@@ -164,60 +217,6 @@
"YrTYerPngjPbZB0bfLOja0vb\n" +
"-----END CERTIFICATE-----";
- // Owner: CN=comodorsacertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=COMODO CA Limited,
- // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
- // ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
- // OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
- // Issuer: CN=COMODO RSA Extended Validation Secure Server CA, O=COMODO CA Limited, L=Salford,
- // ST=Greater Manchester, C=GB
- // Serial number: 720aa2cfa40094521224f901a984b167
- // Valid from: Thu Jun 29 17:00:00 PDT 2017 until: Sun Sep 29 16:59:59 PDT 2019
- private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
- "MIIH8jCCBtqgAwIBAgIQcgqiz6QAlFISJPkBqYSxZzANBgkqhkiG9w0BAQsFADCB\n" +
- "kjELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G\n" +
- "A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxODA2BgNV\n" +
- "BAMTL0NPTU9ETyBSU0EgRXh0ZW5kZWQgVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVy\n" +
- "IENBMB4XDTE3MDYzMDAwMDAwMFoXDTE5MDkyOTIzNTk1OVowggFdMREwDwYDVQQF\n" +
- "EwgwNDA1ODY5MDETMBEGCysGAQQBgjc8AgEDEwJHQjEdMBsGA1UEDxMUUHJpdmF0\n" +
- "ZSBPcmdhbml6YXRpb24xCzAJBgNVBAYTAkdCMQ8wDQYDVQQREwZNNSAzRVExGzAZ\n" +
- "BgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEWMBQG\n" +
- "A1UECRMNVHJhZmZvcmQgUm9hZDEWMBQGA1UECRMNRXhjaGFuZ2UgUXVheTElMCMG\n" +
- "A1UECRMcM3JkIEZsb29yLCAyNiBPZmZpY2UgVmlsbGFnZTEaMBgGA1UEChMRQ09N\n" +
- "T0RPIENBIExpbWl0ZWQxGjAYBgNVBAsTEUNPTU9ETyBFViBTR0MgU1NMMTgwNgYD\n" +
- "VQQDEy9jb21vZG9yc2FjZXJ0aWZpY2F0aW9uYXV0aG9yaXR5LWV2LmNvbW9kb2Nh\n" +
- "LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAND/eZQBTjpBDsut\n" +
- "eKwl+zpTitF8tJzwHAhcQHC2AaLF/GJl1rnjx4OfelMhKhN1Od9KU6onHGOd2w4m\n" +
- "D4EiYK9TpXwuwTyzfkCmnkqxZjYK3KAJN013o4L+8y1zsGVUulpN/GfMaxTb4Xdm\n" +
- "eSekTP91Phw3xezijBq3sa++1rO5RBaT1IHeHhHviC9WNrG8CIg/j5MyC9i43LZH\n" +
- "iRXLER1LzT/MCIRsiG5AEbiYXV5BNd5SiiHtBJ1q0ZJH+AxL2ERaT41VCppboZwT\n" +
- "hmJGGoky9FWjp6z8U6Enx0fAMJIZNEzW6LAJFKPEynEU004jFFCEumPUqqCC4ogx\n" +
- "ulphY80CAwEAAaOCA3QwggNwMB8GA1UdIwQYMBaAFDna/8ooFIqodBMIueQOqdL6\n" +
- "fp1pMB0GA1UdDgQWBBQ+S4ZhIrwOoeGs9BBT4uXq89Ux/jAOBgNVHQ8BAf8EBAMC\n" +
- "BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw\n" +
- "TwYDVR0gBEgwRjA7BgwrBgEEAbIxAQIBBQEwKzApBggrBgEFBQcCARYdaHR0cHM6\n" +
- "Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwBwYFZ4EMAQEwVgYDVR0fBE8wTTBLoEmg\n" +
- "R4ZFaHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPUlNBRXh0ZW5kZWRWYWxp\n" +
- "ZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3JsMIGHBggrBgEFBQcBAQR7MHkwUQYIKwYB\n" +
- "BQUHMAKGRWh0dHA6Ly9jcnQuY29tb2RvY2EuY29tL0NPTU9ET1JTQUV4dGVuZGVk\n" +
- "VmFsaWRhdGlvblNlY3VyZVNlcnZlckNBLmNydDAkBggrBgEFBQcwAYYYaHR0cDov\n" +
- "L29jc3AuY29tb2RvY2EuY29tMDoGA1UdEQQzMDGCL2NvbW9kb3JzYWNlcnRpZmlj\n" +
- "YXRpb25hdXRob3JpdHktZXYuY29tb2RvY2EuY29tMIIBgAYKKwYBBAHWeQIEAgSC\n" +
- "AXAEggFsAWoAdgCkuQmQtBhYFIe7E6LMZ3AKPDWYBPkb37jjd80OyA3cEAAAAVz5\n" +
- "cV7GAAAEAwBHMEUCIQCpgc0Eqw3g4pr+oX88h5xgL1VEAiDpqAhbRtilgYwBbgIg\n" +
- "UaIm+n8AHi55nB//Sb4Nz18GYVcfELfpIzRh1vW9HbYAdwBWFAaaL9fC7NP14b1E\n" +
- "sj7HRna5vJkRXMDvlJhV1onQ3QAAAVz5cVybAAAEAwBIMEYCIQDdsgC4KZ++OP44\n" +
- "X7LbUcNaxe0kFzbctF2L3bnmhp9nXQIhAM0/g+PrZBIBpYlOtzidePi8bBHrLWn2\n" +
- "uBiP3pYIntl4AHcA7ku9t3XOYLrhQmkfq+GeZqMPfl+wctiDAMR7iXqo/csAAAFc\n" +
- "+XFeoQAABAMASDBGAiEAoySTb/QKw7JwtZtPHnECEMzgENQSFy58Kl+Mvcd3SmcC\n" +
- "IQD8cU66Ih3ejvt0OTX+lfxQPKyggQfm4Uk/lwn5LEJXbDANBgkqhkiG9w0BAQsF\n" +
- "AAOCAQEAKEaSYWn3Hi8rfJS4cMTJoMkVp2vpPH2dGXySBEy67TEGRw9+f75w3q95\n" +
- "r1m3P+xsR6dBoidTq/6wqUYI51lB4Fq9ylh1Stp5Gj54CuyT+S31l7lD7sl0KMsn\n" +
- "HDUDQHId7hKeORYpiIZOcrKOglKdi1uiGwDgoiLKh98lUrZA6durrhH+sl69wqp2\n" +
- "0XAu+3hurXzCoZFJfyngTO1kt9qcFUAxc5LofIa9QvC6VR7dI4aAh7dUpIRlnjG3\n" +
- "jJ1mUMTqWO6TFTtddb+uQjDqNgkYYYNuSax1WMEIZWbIi13EjXK1GPQUXJe6gQin\n" +
- "NUq9JH9NPK6m8A1YKT+wgzfTDeaV2Q==\n" +
- "-----END CERTIFICATE-----";
-
public void runTest(ValidatePathWithParams pathValidator) throws Exception {
// Validate valid
pathValidator.validate(new String[]{VALID, INT},
@@ -226,7 +225,7 @@
// Validate Revoked
pathValidator.validate(new String[]{REVOKED, INT},
ValidatePathWithParams.Status.REVOKED,
- "Thu Nov 29 08:41:09 PST 2018", System.out);
+ "Wed Oct 02 06:06:24 PDT 2019", System.out);
}
}
@@ -263,13 +262,58 @@
// Owner: CN=comodoecccertificationauthority-ev.comodoca.com, OU=COMODO EV SSL, O=Sectigo Limited,
// STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
+ // OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization, OID.1.3.6.1.4.1.311.60.2.1.3=GB,
+ // SERIALNUMBER=04058690
+ // Issuer: CN=COMODO ECC Extended Validation Secure Server CA, O=COMODO CA Limited, L=Salford,
+ // ST=Greater Manchester, C=GB
+ // Serial number: 7972d9d8472a2d52ad1ee6edfb16cbe1
+ // Valid from: Sun Sep 29 17:00:00 PDT 2019 until: Tue Dec 28 15:59:59 PST 2021
+ private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ "MIIGPzCCBeWgAwIBAgIQeXLZ2EcqLVKtHubt+xbL4TAKBggqhkjOPQQDAjCBkjEL\n" +
+ "MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE\n" +
+ "BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxODA2BgNVBAMT\n" +
+ "L0NPTU9ETyBFQ0MgRXh0ZW5kZWQgVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENB\n" +
+ "MB4XDTE5MDkzMDAwMDAwMFoXDTIxMTIyODIzNTk1OVowggE6MREwDwYDVQQFEwgw\n" +
+ "NDA1ODY5MDETMBEGCysGAQQBgjc8AgEDEwJHQjEdMBsGA1UEDxMUUHJpdmF0ZSBP\n" +
+ "cmdhbml6YXRpb24xCzAJBgNVBAYTAkdCMQ8wDQYDVQQREwZNNSAzRVExEDAOBgNV\n" +
+ "BAcTB1NhbGZvcmQxFjAUBgNVBAkTDVRyYWZmb3JkIFJvYWQxFjAUBgNVBAkTDUV4\n" +
+ "Y2hhbmdlIFF1YXkxJTAjBgNVBAkTHDNyZCBGbG9vciwgMjYgT2ZmaWNlIFZpbGxh\n" +
+ "Z2UxGDAWBgNVBAoTD1NlY3RpZ28gTGltaXRlZDEWMBQGA1UECxMNQ09NT0RPIEVW\n" +
+ "IFNTTDE4MDYGA1UEAxMvY29tb2RvZWNjY2VydGlmaWNhdGlvbmF1dGhvcml0eS1l\n" +
+ "di5jb21vZG9jYS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS3bqoFLtNG\n" +
+ "7/J9H5GKosDNbYL5SykVmU5FzgSEt81gyAWShkqMSfAnO50fpr65E+o86E+BR3o8\n" +
+ "V9FAU5wuOaGBo4IDcDCCA2wwHwYDVR0jBBgwFoAU007DGbpYWdEcYLdhU0c7p3eP\n" +
+ "+IowHQYDVR0OBBYEFOlnS3MqxwXDpne8IQMXMZHlVKRXMA4GA1UdDwEB/wQEAwIF\n" +
+ "gDAMBgNVHRMBAf8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBP\n" +
+ "BgNVHSAESDBGMDsGDCsGAQQBsjEBAgEFATArMCkGCCsGAQUFBwIBFh1odHRwczov\n" +
+ "L3NlY3VyZS5jb21vZG8uY29tL0NQUzAHBgVngQwBATBWBgNVHR8ETzBNMEugSaBH\n" +
+ "hkVodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9FQ0NFeHRlbmRlZFZhbGlk\n" +
+ "YXRpb25TZWN1cmVTZXJ2ZXJDQS5jcmwwgYcGCCsGAQUFBwEBBHsweTBRBggrBgEF\n" +
+ "BQcwAoZFaHR0cDovL2NydC5jb21vZG9jYS5jb20vQ09NT0RPRUNDRXh0ZW5kZWRW\n" +
+ "YWxpZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8v\n" +
+ "b2NzcC5jb21vZG9jYS5jb20wOgYDVR0RBDMwMYIvY29tb2RvZWNjY2VydGlmaWNh\n" +
+ "dGlvbmF1dGhvcml0eS1ldi5jb21vZG9jYS5jb20wggF8BgorBgEEAdZ5AgQCBIIB\n" +
+ "bASCAWgBZgB1AO5Lvbd1zmC64UJpH6vhnmajD35fsHLYgwDEe4l6qP3LAAABbYME\n" +
+ "EzgAAAQDAEYwRAIgbdo71lBleuJiq+D0ZLp51oVUyWD9EyrtgBSCNwIW4cMCIAqg\n" +
+ "0VFTWHEmAVjaV23fGj3Ybu3mpSiHr6viGlgA2lYaAHUAVYHUwhaQNgFK6gubVzxT\n" +
+ "8MDkOHhwJQgXL6OqHQcT0wwAAAFtgwQTKAAABAMARjBEAiBb/gW1RU7kgFBiNpHx\n" +
+ "LStujKIocyENUTXsMbsac+LktwIgXbEr8vOOCEdBdXQ2F/FKec8ft6gz57mHNmwl\n" +
+ "pp7phbQAdgC72d+8H4pxtZOUI5eqkntHOFeVCqtS6BqQlmQ2jh7RhQAAAW2DBBM6\n" +
+ "AAAEAwBHMEUCIQDjKN3h86ofR94+JxLFoYuoA+DRtxEY8XGg+NQXlZfUrgIgEoO2\n" +
+ "ZzKbGfohdwj/WtDwJDRX5pjXF4M0nECiwtYXDIwwCgYIKoZIzj0EAwIDSAAwRQIg\n" +
+ "AkIRVQBwrElFjrnqk5XPvnlnwkIm1A70ayqOf1FexoQCIQC8tBTn//RCfrhcgTjd\n" +
+ "ER4wRjFfFoc6lC68OHGVg9CZZg==\n" +
+ "-----END CERTIFICATE-----";
+
+ // Owner: CN=comodoecccertificationauthority-ev.comodoca.com, OU=COMODO EV SSL, O=Sectigo Limited,
+ // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
// ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
// OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
// Issuer: CN=COMODO ECC Extended Validation Secure Server CA, O=COMODO CA Limited, L=Salford,
// ST=Greater Manchester, C=GB
// Serial number: 603a5c2f85b63e00ba46ce8c3f6000b0
// Valid from: Wed Nov 28 16:00:00 PST 2018 until: Fri Feb 26 15:59:59 PST 2021
- private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
"MIIGXzCCBgWgAwIBAgIQYDpcL4W2PgC6Rs6MP2AAsDAKBggqhkjOPQQDAjCBkjEL\n" +
"MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE\n" +
"BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxODA2BgNVBAMT\n" +
@@ -307,52 +351,6 @@
"KOC7\n" +
"-----END CERTIFICATE-----";
- // Owner: CN=comodoecccertificationauthority-ev.comodoca.com, OU=COMODO EV SSL, O=COMODO CA Limited,
- // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
- // ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
- // OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
- // Issuer: CN=COMODO ECC Extended Validation Secure Server CA, O=COMODO CA Limited, L=Salford,
- // ST=Greater Manchester, C=GB
- // Serial number: 414e5d66ec7d15ca504213f2811d57af
- // Valid from: Mon Jul 03 17:00:00 PDT 2017 until: Thu Oct 03 16:59:59 PDT 2019
- private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
- "MIIGYDCCBgWgAwIBAgIQQU5dZux9FcpQQhPygR1XrzAKBggqhkjOPQQDAjCBkjEL\n" +
- "MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE\n" +
- "BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxODA2BgNVBAMT\n" +
- "L0NPTU9ETyBFQ0MgRXh0ZW5kZWQgVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENB\n" +
- "MB4XDTE3MDcwNDAwMDAwMFoXDTE5MTAwMzIzNTk1OVowggFZMREwDwYDVQQFEwgw\n" +
- "NDA1ODY5MDETMBEGCysGAQQBgjc8AgEDEwJHQjEdMBsGA1UEDxMUUHJpdmF0ZSBP\n" +
- "cmdhbml6YXRpb24xCzAJBgNVBAYTAkdCMQ8wDQYDVQQREwZNNSAzRVExGzAZBgNV\n" +
- "BAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEWMBQGA1UE\n" +
- "CRMNVHJhZmZvcmQgUm9hZDEWMBQGA1UECRMNRXhjaGFuZ2UgUXVheTElMCMGA1UE\n" +
- "CRMcM3JkIEZsb29yLCAyNiBPZmZpY2UgVmlsbGFnZTEaMBgGA1UEChMRQ09NT0RP\n" +
- "IENBIExpbWl0ZWQxFjAUBgNVBAsTDUNPTU9ETyBFViBTU0wxODA2BgNVBAMTL2Nv\n" +
- "bW9kb2VjY2NlcnRpZmljYXRpb25hdXRob3JpdHktZXYuY29tb2RvY2EuY29tMFkw\n" +
- "EwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEt26qBS7TRu/yfR+RiqLAzW2C+UspFZlO\n" +
- "Rc4EhLfNYMgFkoZKjEnwJzudH6a+uRPqPOhPgUd6PFfRQFOcLjmhgaOCA3EwggNt\n" +
- "MB8GA1UdIwQYMBaAFNNOwxm6WFnRHGC3YVNHO6d3j/iKMB0GA1UdDgQWBBTpZ0tz\n" +
- "KscFw6Z3vCEDFzGR5VSkVzAOBgNVHQ8BAf8EBAMCBYAwDAYDVR0TAQH/BAIwADAd\n" +
- "BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwTwYDVR0gBEgwRjA7BgwrBgEE\n" +
- "AbIxAQIBBQEwKzApBggrBgEFBQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNv\n" +
- "bS9DUFMwBwYFZ4EMAQEwVgYDVR0fBE8wTTBLoEmgR4ZFaHR0cDovL2NybC5jb21v\n" +
- "ZG9jYS5jb20vQ09NT0RPRUNDRXh0ZW5kZWRWYWxpZGF0aW9uU2VjdXJlU2VydmVy\n" +
- "Q0EuY3JsMIGHBggrBgEFBQcBAQR7MHkwUQYIKwYBBQUHMAKGRWh0dHA6Ly9jcnQu\n" +
- "Y29tb2RvY2EuY29tL0NPTU9ET0VDQ0V4dGVuZGVkVmFsaWRhdGlvblNlY3VyZVNl\n" +
- "cnZlckNBLmNydDAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuY29tb2RvY2EuY29t\n" +
- "MDoGA1UdEQQzMDGCL2NvbW9kb2VjY2NlcnRpZmljYXRpb25hdXRob3JpdHktZXYu\n" +
- "Y29tb2RvY2EuY29tMIIBfQYKKwYBBAHWeQIEAgSCAW0EggFpAWcAdgCkuQmQtBhY\n" +
- "FIe7E6LMZ3AKPDWYBPkb37jjd80OyA3cEAAAAV0NLqsqAAAEAwBHMEUCIAz9Jjq3\n" +
- "qLUd/a2PYZnLGsEG/MrL7vab5rmGBg8RGAJxAiEA7JJnar07NIjCLLO77xJ3UFcu\n" +
- "UMM3M8JgGC8wbuRwxbUAdgBWFAaaL9fC7NP14b1Esj7HRna5vJkRXMDvlJhV1onQ\n" +
- "3QAAAV0NLqjmAAAEAwBHMEUCIHRvPWKr7vPMBWx1gLPkt8inPINWPNSoax178e5A\n" +
- "D0cPAiEAvRL/VP4DLiyHvcU9AOqTzQXGuWCzswWKG59hSm7gS4kAdQDuS723dc5g\n" +
- "uuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAV0NLqsDAAAEAwBGMEQCIFALT043\n" +
- "X5IffLsxIAGXTrWgkZHf12QKgrYKXVB629eOAiAIeci2xi3fUW6mU8tT4LwyjowV\n" +
- "DkrSCw1ZMo0JApsfzTAKBggqhkjOPQQDAgNJADBGAiEA7HUxjwx0MBC+4PuPx4Z1\n" +
- "WpKz7jdHOMTh1sdaoVV5hNoCIQDrnjBFUopXHTvm/rj+aMFIeYejggPqv14KJOqT\n" +
- "gym+uA==\n" +
- "-----END CERTIFICATE-----";
-
public void runTest(ValidatePathWithParams pathValidator) throws Exception {
// Validate valid
pathValidator.validate(new String[]{VALID, INT},
@@ -361,19 +359,61 @@
// Validate Revoked
pathValidator.validate(new String[]{REVOKED, INT},
ValidatePathWithParams.Status.REVOKED,
- "Thu Nov 29 08:12:02 PST 2018", System.out);
+ "Wed Oct 02 06:05:57 PDT 2019", System.out);
}
}
class ComodoUserTrustRSA {
+ // Owner: CN=Sectigo RSA Extended Validation Secure Server CA, O=Sectigo Limited, L=Salford,
+ // ST=Greater Manchester, C=GB
+ // Issuer: CN=USERTrust RSA Certification Authority, O=The USERTRUST Network, L=Jersey City, ST=New Jersey, C=US
+ // Serial number: 284e39c14b386d889c7299e58cd05a57
+ // Valid from: Thu Nov 01 17:00:00 PDT 2018 until: Tue Dec 31 15:59:59 PST 2030
+ private static final String INT_VALID = "-----BEGIN CERTIFICATE-----\n" +
+ "MIIGNDCCBBygAwIBAgIQKE45wUs4bYiccpnljNBaVzANBgkqhkiG9w0BAQwFADCB\n" +
+ "iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl\n" +
+ "cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV\n" +
+ "BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTgx\n" +
+ "MTAyMDAwMDAwWhcNMzAxMjMxMjM1OTU5WjCBkTELMAkGA1UEBhMCR0IxGzAZBgNV\n" +
+ "BAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEYMBYGA1UE\n" +
+ "ChMPU2VjdGlnbyBMaW1pdGVkMTkwNwYDVQQDEzBTZWN0aWdvIFJTQSBFeHRlbmRl\n" +
+ "ZCBWYWxpZGF0aW9uIFNlY3VyZSBTZXJ2ZXIgQ0EwggEiMA0GCSqGSIb3DQEBAQUA\n" +
+ "A4IBDwAwggEKAoIBAQCaoslYBiqFev0Yc4TXPa0s9oliMcn9VaENfTUK4GVT7niB\n" +
+ "QXxC6Mt8kTtvyr5lU92hDQDh2WDPQsZ7oibh75t2kowT3z1S+Sy1GsUDM4NbdOde\n" +
+ "orcmzFm/b4bwD4G/G+pB4EX1HSfjN9eT0Hje+AGvCrd2MmnxJ+Yymv9BH9OB65jK\n" +
+ "rUO9Na4iHr48XWBDFvzsPCJ11Uioof6dRBVp+Lauj88Z7k2X8d606HeXn43h6acp\n" +
+ "LLURWyqXM0CrzedVWBzuXKuBEaqD6w/1VpLJvSU+wl3ScvXSLFp82DSRJVJONXWl\n" +
+ "dp9gjJioPGRByeZw11k3galbbF5gFK9xSnbDx29LAgMBAAGjggGNMIIBiTAfBgNV\n" +
+ "HSMEGDAWgBRTeb9aqitKz1SA4dibwJ3ysgNmyzAdBgNVHQ4EFgQULGn/gMmHkK40\n" +
+ "4bTnTJOFmUDpp7IwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw\n" +
+ "HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDoGA1UdIAQzMDEwLwYEVR0g\n" +
+ "ADAnMCUGCCsGAQUFBwIBFhlodHRwczovL2Nwcy51c2VydHJ1c3QuY29tMFAGA1Ud\n" +
+ "HwRJMEcwRaBDoEGGP2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RS\n" +
+ "U0FDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDB2BggrBgEFBQcBAQRqMGgwPwYI\n" +
+ "KwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FB\n" +
+ "ZGRUcnVzdENBLmNydDAlBggrBgEFBQcwAYYZaHR0cDovL29jc3AudXNlcnRydXN0\n" +
+ "LmNvbTANBgkqhkiG9w0BAQwFAAOCAgEAQ4AzPxVypLyy3IjUUmVl7FaxrHsXQq2z\n" +
+ "Zt2gKnHQShuA+5xpRPNndjvhHk4D08PZXUe6Im7E5knqxtyl5aYdldb+HI/7f+zd\n" +
+ "W/1ub2N4Vq4ZYUjcZ1ECOFK7Z2zoNicDmU+Fe/TreXPuPsDicTG/tMcWEVM558OQ\n" +
+ "TJkB2LK3ZhGukWM/RTMRcRdXaXOX8Lh0ylzRO1O0ObXytvOFpkkkD92HGsfS06i7\n" +
+ "NLDPJEeZXqzHE5Tqj7VSAj+2luwfaXaPLD8lQEVci8xmsPGOn0mXE1ZzsChEPhVq\n" +
+ "FYQUsbiRJRhidKauhd+G2CkRTcR5fpsuz+iStB9s5Fks9lKoXnn0hv78VYjvR78C\n" +
+ "Cvj5FW/ounHjWTWMb3il9S5ngbFGcelB1l/MQkR63+1ybdi2OpjNWJCftxOWUpkC\n" +
+ "xaRdnOnSj7GQY0NLn8Gtq9FcSZydtkVgXpouSFZkXNS/MYwbcCCcRKBbrk8ss0SI\n" +
+ "Xg1gTURjh9VP1OHm0OktYcUw9e90wHIDn7h0qA+bWOsZquSRzT4s2crF3ZSA3tuV\n" +
+ "/UJ33mjdVO8wBD8aI5y10QreSPJvZHHNDyCmoyjXvNhR+u3arXUoHWxO+MZBeXbi\n" +
+ "iF7Nwn/IEmQvWBW8l6D26CXIavcY1kAJcfyzHkrPbLo+fAOa/KFl3lIU+0biEVNk\n" +
+ "Q9zXE6hC6X4=\n" +
+ "-----END CERTIFICATE-----";
+
// Owner: CN=USERTrust RSA Extended Validation Secure Server CA,
// O=The USERTRUST Network, L=Jersey City, ST=New Jersey, C=US
// Issuer: CN=USERTrust RSA Certification Authority, O=The USERTRUST Network,
// L=Jersey City, ST=New Jersey, C=US
// Serial number: f6bb751efa7d2e8368e606407334f83
// Valid from: Sat Feb 11 16:00:00 PST 2012 until: Thu Feb 11 15:59:59 PST 2027
- private static final String INT = "-----BEGIN CERTIFICATE-----\n"
+ private static final String INT_REVOKED = "-----BEGIN CERTIFICATE-----\n"
+ "MIIGGTCCBAGgAwIBAgIQD2u3Ue+n0ug2jmBkBzNPgzANBgkqhkiG9w0BAQwFADCB\n"
+ "iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl\n"
+ "cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV\n"
@@ -409,15 +449,69 @@
+ "4fokbdNREXoShKClNIPbB5iY+WdSzb9CKLyb96g=\n"
+ "-----END CERTIFICATE-----";
- // Owner: CN=usertrustrsacertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL,
- // O=Sectigo Limited, STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road,
- // L=Salford, ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
+ // Owner: CN=usertrustrsacertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=Sectigo Limited,
+ // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford, ST=Manchester,
+ // OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization, OID.1.3.6.1.4.1.311.60.2.1.3=GB,
+ // SERIALNUMBER=04058690
+ // Issuer: CN=Sectigo RSA Extended Validation Secure Server CA, O=Sectigo Limited, L=Salford,
+ // ST=Greater Manchester, C=GB
+ // Serial number: b07fd164b5790c9d5d1fddff5819cdb2
+ // Valid from: Sun Sep 29 17:00:00 PDT 2019 until: Tue Dec 28 15:59:59 PST 2021
+ private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ "MIIH5TCCBs2gAwIBAgIRALB/0WS1eQydXR/d/1gZzbIwDQYJKoZIhvcNAQELBQAw\n" +
+ "gZExCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO\n" +
+ "BgNVBAcTB1NhbGZvcmQxGDAWBgNVBAoTD1NlY3RpZ28gTGltaXRlZDE5MDcGA1UE\n" +
+ "AxMwU2VjdGlnbyBSU0EgRXh0ZW5kZWQgVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVy\n" +
+ "IENBMB4XDTE5MDkzMDAwMDAwMFoXDTIxMTIyODIzNTk1OVowggFWMREwDwYDVQQF\n" +
+ "EwgwNDA1ODY5MDETMBEGCysGAQQBgjc8AgEDEwJHQjEdMBsGA1UEDxMUUHJpdmF0\n" +
+ "ZSBPcmdhbml6YXRpb24xCzAJBgNVBAYTAkdCMQ8wDQYDVQQREwZNNSAzRVExEzAR\n" +
+ "BgNVBAgTCk1hbmNoZXN0ZXIxEDAOBgNVBAcTB1NhbGZvcmQxFjAUBgNVBAkTDVRy\n" +
+ "YWZmb3JkIFJvYWQxFjAUBgNVBAkTDUV4Y2hhbmdlIFF1YXkxJTAjBgNVBAkTHDNy\n" +
+ "ZCBGbG9vciwgMjYgT2ZmaWNlIFZpbGxhZ2UxGDAWBgNVBAoTD1NlY3RpZ28gTGlt\n" +
+ "aXRlZDEaMBgGA1UECxMRQ09NT0RPIEVWIFNHQyBTU0wxOzA5BgNVBAMTMnVzZXJ0\n" +
+ "cnVzdHJzYWNlcnRpZmljYXRpb25hdXRob3JpdHktZXYuY29tb2RvY2EuY29tMIIB\n" +
+ "IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnh/rxeiYwpLa651eLvGnR+RE\n" +
+ "rhDWkTZtqZcHw9Oy7JL2uELyEPbM+v0az40cBHS0bQZJZbWmXNukMUMSwIb4z7t8\n" +
+ "OXlxz9uvxEufvlqBl4qeC/z3LpFBRRHEero3yGKVwkoe1aP2Pq7Udi+7i7eVZZdA\n" +
+ "1ticxZWo/UBU9mwbIOYqf/4xzZ6G891hKb+NAuuEfxG52vXZl8odMThfHuDlkfS7\n" +
+ "nZMQBaO40KJeSEBhr+5TIS7d7tWWye/F6oEQ0+dHBiF9PyZ1dXoO8aue/80mP+0F\n" +
+ "MYTmRFsKHge6ZjojfH9cLlR5kTqtP5Tqh5GBQ4zp3uyIBBU6ylKp9PNHkewGUQID\n" +
+ "AQABo4IDbjCCA2owHwYDVR0jBBgwFoAULGn/gMmHkK404bTnTJOFmUDpp7IwHQYD\n" +
+ "VR0OBBYEFHz7cvDn1LYe2M+z4plwQn7rt938MA4GA1UdDwEB/wQEAwIFoDAMBgNV\n" +
+ "HRMBAf8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBJBgNVHSAE\n" +
+ "QjBAMDUGDCsGAQQBsjEBAgEFATAlMCMGCCsGAQUFBwIBFhdodHRwczovL3NlY3Rp\n" +
+ "Z28uY29tL0NQUzAHBgVngQwBATBWBgNVHR8ETzBNMEugSaBHhkVodHRwOi8vY3Js\n" +
+ "LnNlY3RpZ28uY29tL1NlY3RpZ29SU0FFeHRlbmRlZFZhbGlkYXRpb25TZWN1cmVT\n" +
+ "ZXJ2ZXJDQS5jcmwwgYYGCCsGAQUFBwEBBHoweDBRBggrBgEFBQcwAoZFaHR0cDov\n" +
+ "L2NydC5zZWN0aWdvLmNvbS9TZWN0aWdvUlNBRXh0ZW5kZWRWYWxpZGF0aW9uU2Vj\n" +
+ "dXJlU2VydmVyQ0EuY3J0MCMGCCsGAQUFBzABhhdodHRwOi8vb2NzcC5zZWN0aWdv\n" +
+ "LmNvbTA9BgNVHREENjA0gjJ1c2VydHJ1c3Ryc2FjZXJ0aWZpY2F0aW9uYXV0aG9y\n" +
+ "aXR5LWV2LmNvbW9kb2NhLmNvbTCCAX4GCisGAQQB1nkCBAIEggFuBIIBagFoAHYA\n" +
+ "7ku9t3XOYLrhQmkfq+GeZqMPfl+wctiDAMR7iXqo/csAAAFtgzv54wAABAMARzBF\n" +
+ "AiB5PmhsK3zU3XdKvyxw/wWHMmLI7apHLa1yKdjkA8H+ggIhALdUx7Tl8aeWhK6z\n" +
+ "lh+PHvMAdCcAJK6w9qBJGQtSrYO5AHUAVYHUwhaQNgFK6gubVzxT8MDkOHhwJQgX\n" +
+ "L6OqHQcT0wwAAAFtgzv5zgAABAMARjBEAiBumSwAUamibqJXTN2cf/H3mjd0T35/\n" +
+ "UK9w2hu9gFobxgIgSXTLndHyqFUmcmquu3It0WC1yl6YMceGixbQL1e8BQcAdwC7\n" +
+ "2d+8H4pxtZOUI5eqkntHOFeVCqtS6BqQlmQ2jh7RhQAAAW2DO/nXAAAEAwBIMEYC\n" +
+ "IQDHRs10oYoXE5yq6WsiksjdQsUWZNpbSsrmz0u+KlxTVQIhAJ4rvHItKSeJLkaN\n" +
+ "S3YpVZnkN8tOwuxPsYeyVx/BtaNpMA0GCSqGSIb3DQEBCwUAA4IBAQAPFIsUFymo\n" +
+ "VTp0vntHrZpBApBQzDeriQv7Bi7tmou/Ng47RtXW3DjGdrePGSfOdl7h62k8qprU\n" +
+ "JeLyloDqhvmT/CG/hdwrfZ3Sv3N2xpetGcnW5S3oEi3m+/M1ls9eD+x1vybqV9Kd\n" +
+ "lcjuV7SYDlbvAS9w7TcygudhdW0cI8XTCvesGKohBkAlqaQ/MWYpt4WvsxHjbWgn\n" +
+ "5ZlIYR6A1ZFEjADifViH/5AA79lgGhAskkIWPjvRFalEVKTKtjhRK76eCfZs4Frr\n" +
+ "CEOpon+BeNKk+x/K/r10dSoWe0SV2uGVxTD83zkP++eREwo1hTgn8bXn7ftlnA3j\n" +
+ "7ml+Usz6udaD\n" +
+ "-----END CERTIFICATE-----";
+
+ // Owner: CN=usertrustrsacertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=Sectigo Limited,
+ // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
+ // ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
// OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
// Issuer: CN=USERTrust RSA Extended Validation Secure Server CA, O=The USERTRUST Network, L=Jersey City,
// ST=New Jersey, C=US
// Serial number: d3c204e8df6a1539568cf15e97e57b1d
// Valid from: Wed Nov 28 16:00:00 PST 2018 until: Fri Feb 26 15:59:59 PST 2021
- private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
"MIIIADCCBuigAwIBAgIRANPCBOjfahU5VozxXpflex0wDQYJKoZIhvcNAQELBQAw\n" +
"gZUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtK\n" +
"ZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMTswOQYD\n" +
@@ -463,81 +557,56 @@
"3Ld31zbQaywKdpCsT74/hEBMfcDiP02mmtyrlqHD4R3tdYne\n" +
"-----END CERTIFICATE-----";
- // Owner: CN=usertrustrsacertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=COMODO CA Limited,
- // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
- // ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
- // OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
- // Issuer: CN=USERTrust RSA Extended Validation Secure Server CA, O=The USERTRUST Network, L=Jersey City,
- // ST=New Jersey, C=US
- // Serial number: ffcada019c9fb1155a32300083cb99c9
- // Valid from: Mon Jul 03 17:00:00 PDT 2017 until: Thu Oct 03 16:59:59 PDT 2019
- private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
- "MIIIATCCBumgAwIBAgIRAP/K2gGcn7EVWjIwAIPLmckwDQYJKoZIhvcNAQELBQAw\n" +
- "gZUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtK\n" +
- "ZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMTswOQYD\n" +
- "VQQDEzJVU0VSVHJ1c3QgUlNBIEV4dGVuZGVkIFZhbGlkYXRpb24gU2VjdXJlIFNl\n" +
- "cnZlciBDQTAeFw0xNzA3MDQwMDAwMDBaFw0xOTEwMDMyMzU5NTlaMIIBYDERMA8G\n" +
- "A1UEBRMIMDQwNTg2OTAxEzARBgsrBgEEAYI3PAIBAxMCR0IxHTAbBgNVBA8TFFBy\n" +
- "aXZhdGUgT3JnYW5pemF0aW9uMQswCQYDVQQGEwJHQjEPMA0GA1UEERMGTTUgM0VR\n" +
- "MRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcTB1NhbGZvcmQx\n" +
- "FjAUBgNVBAkTDVRyYWZmb3JkIFJvYWQxFjAUBgNVBAkTDUV4Y2hhbmdlIFF1YXkx\n" +
- "JTAjBgNVBAkTHDNyZCBGbG9vciwgMjYgT2ZmaWNlIFZpbGxhZ2UxGjAYBgNVBAoT\n" +
- "EUNPTU9ETyBDQSBMaW1pdGVkMRowGAYDVQQLExFDT01PRE8gRVYgU0dDIFNTTDE7\n" +
- "MDkGA1UEAxMydXNlcnRydXN0cnNhY2VydGlmaWNhdGlvbmF1dGhvcml0eS1ldi5j\n" +
- "b21vZG9jYS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCeH+vF\n" +
- "6JjCktrrnV4u8adH5ESuENaRNm2plwfD07Lskva4QvIQ9sz6/RrPjRwEdLRtBkll\n" +
- "taZc26QxQxLAhvjPu3w5eXHP26/ES5++WoGXip4L/PcukUFFEcR6ujfIYpXCSh7V\n" +
- "o/Y+rtR2L7uLt5Vll0DW2JzFlaj9QFT2bBsg5ip//jHNnobz3WEpv40C64R/Ebna\n" +
- "9dmXyh0xOF8e4OWR9LudkxAFo7jQol5IQGGv7lMhLt3u1ZbJ78XqgRDT50cGIX0/\n" +
- "JnV1eg7xq57/zSY/7QUxhOZEWwoeB7pmOiN8f1wuVHmROq0/lOqHkYFDjOne7IgE\n" +
- "FTrKUqn080eR7AZRAgMBAAGjggN8MIIDeDAfBgNVHSMEGDAWgBQvgU/iZvq8aL+Z\n" +
- "Q4RSiSA6gvOkpTAdBgNVHQ4EFgQUfPty8OfUth7Yz7PimXBCfuu33fwwDgYDVR0P\n" +
- "AQH/BAQDAgWgMAwGA1UdEwEB/wQCMAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG\n" +
- "AQUFBwMCMEsGA1UdIAREMEIwNwYMKwYBBAGyMQECAQUBMCcwJQYIKwYBBQUHAgEW\n" +
- "GWh0dHBzOi8vY3BzLnVzZXJ0cnVzdC5jb20wBwYFZ4EMAQEwWgYDVR0fBFMwUTBP\n" +
- "oE2gS4ZJaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdFJTQUV4dGVu\n" +
- "ZGVkVmFsaWRhdGlvblNlY3VyZVNlcnZlckNBLmNybDCBjQYIKwYBBQUHAQEEgYAw\n" +
- "fjBVBggrBgEFBQcwAoZJaHR0cDovL2NydC51c2VydHJ1c3QuY29tL1VTRVJUcnVz\n" +
- "dFJTQUV4dGVuZGVkVmFsaWRhdGlvblNlY3VyZVNlcnZlckNBLmNydDAlBggrBgEF\n" +
- "BQcwAYYZaHR0cDovL29jc3AudXNlcnRydXN0LmNvbTA9BgNVHREENjA0gjJ1c2Vy\n" +
- "dHJ1c3Ryc2FjZXJ0aWZpY2F0aW9uYXV0aG9yaXR5LWV2LmNvbW9kb2NhLmNvbTCC\n" +
- "AX8GCisGAQQB1nkCBAIEggFvBIIBawFpAHYApLkJkLQYWBSHuxOizGdwCjw1mAT5\n" +
- "G9+443fNDsgN3BAAAAFdDU2iYQAABAMARzBFAiB0o4GnVHD8MeVQ32D0XYu+EQQW\n" +
- "jvN78rmCfk0OEBxyFAIhAKgyctIn0IaDJiZzsrtAiqEnkcMtuh8o+R0Rqw1ygAjk\n" +
- "AHcAVhQGmi/XwuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFdDU2gFgAABAMA\n" +
- "SDBGAiEA7mcmZ8H5uHuNCdI0CVxsqDZQcZX/gVk94KckePkzQoACIQCHwm5hcvNC\n" +
- "M8vNmFkboQN79DglRctHrlh143A6mUTk8QB2AO5Lvbd1zmC64UJpH6vhnmajD35f\n" +
- "sHLYgwDEe4l6qP3LAAABXQ1NojoAAAQDAEcwRQIhAPqwijgE0Fr6uJ+yF+TvyXco\n" +
- "Hduv9h7R5WWwJfghXiMyAiBB4+fJm4rIcOnJBZmOqFnRpIjPN0jwDqJT0nDHxaXA\n" +
- "nDANBgkqhkiG9w0BAQsFAAOCAQEACXitF1bTEvV1HX11WrT/XuoMhsoPK4TS16rs\n" +
- "FqztV4iXKlA1/h5qbsjYY1gVrM+/6kQkmEs5qrxsek2WNxY80NO3WAzroRJ3H9Sd\n" +
- "mPn0No2P8LZ5Fs5hvaD/PfWO5xxey80c3kGyvWOej90P3IrL/1RiULyh95TrXBjI\n" +
- "ddCBsZ28904wsQUrPBPMpiu0DKl1HR/em9WkcipMi+onJxxFWjucssz5PW/BzGYF\n" +
- "jfWLDEI0tN5L4CWV3iVXFXOURY1Mwhtsey9jvlEyxSsys55QdKF40yGgtV9VC+os\n" +
- "7hJP33+qA0cvCTaRytiPP6z/l2G/KSIXTyv6SxzGhsTFfzLAOg==\n" +
- "-----END CERTIFICATE-----";
-
public void runTest(ValidatePathWithParams pathValidator) throws Exception {
// Validate valid
- pathValidator.validate(new String[]{VALID, INT},
+ pathValidator.validate(new String[]{VALID, INT_VALID},
ValidatePathWithParams.Status.GOOD, null, System.out);
// Validate Revoked
- pathValidator.validate(new String[]{REVOKED, INT},
+ pathValidator.validate(new String[]{REVOKED, INT_REVOKED},
ValidatePathWithParams.Status.REVOKED,
- "Thu Nov 29 10:58:13 PST 2018", System.out);
+ "Wed Oct 02 06:07:12 PDT 2019", System.out);
}
}
class ComodoUserTrustECC {
+ // Owner: CN=Sectigo ECC Extended Validation Secure Server CA, O=Sectigo Limited, L=Salford,
+ // ST=Greater Manchester, C=GB
+ // Issuer: CN=USERTrust ECC Certification Authority, O=The USERTRUST Network, L=Jersey City, ST=New Jersey, C=US
+ // Serial number: 80f5606d3a162b143adc12fbe8c2066f
+ // Valid from: Thu Nov 01 17:00:00 PDT 2018 until: Tue Dec 31 15:59:59 PST 2030
+ private static final String INT_VALID = "-----BEGIN CERTIFICATE-----\n" +
+ "MIIDyTCCA0+gAwIBAgIRAID1YG06FisUOtwS++jCBm8wCgYIKoZIzj0EAwMwgYgx\n" +
+ "CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz\n" +
+ "ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYDVQQD\n" +
+ "EyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTE4MTEw\n" +
+ "MjAwMDAwMFoXDTMwMTIzMTIzNTk1OVowgZExCzAJBgNVBAYTAkdCMRswGQYDVQQI\n" +
+ "ExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcTB1NhbGZvcmQxGDAWBgNVBAoT\n" +
+ "D1NlY3RpZ28gTGltaXRlZDE5MDcGA1UEAxMwU2VjdGlnbyBFQ0MgRXh0ZW5kZWQg\n" +
+ "VmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENBMFkwEwYHKoZIzj0CAQYIKoZIzj0D\n" +
+ "AQcDQgAEAyJ5Ca9JyXq8bO+krLVWysbtm7fdMSJ54uFD23t0x6JAC4IjxevfQJzW\n" +
+ "z4T6yY+FybTBqtOa++ijJFnkB5wKy6OCAY0wggGJMB8GA1UdIwQYMBaAFDrhCYbU\n" +
+ "zxnClnZ0SXbc4DXGY2OaMB0GA1UdDgQWBBTvwSqVDDLa+3Mw3IoT2BVL9xPo+DAO\n" +
+ "BgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAUBggr\n" +
+ "BgEFBQcDAQYIKwYBBQUHAwIwOgYDVR0gBDMwMTAvBgRVHSAAMCcwJQYIKwYBBQUH\n" +
+ "AgEWGWh0dHBzOi8vY3BzLnVzZXJ0cnVzdC5jb20wUAYDVR0fBEkwRzBFoEOgQYY/\n" +
+ "aHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdEVDQ0NlcnRpZmljYXRp\n" +
+ "b25BdXRob3JpdHkuY3JsMHYGCCsGAQUFBwEBBGowaDA/BggrBgEFBQcwAoYzaHR0\n" +
+ "cDovL2NydC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdEVDQ0FkZFRydXN0Q0EuY3J0\n" +
+ "MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC51c2VydHJ1c3QuY29tMAoGCCqGSM49\n" +
+ "BAMDA2gAMGUCMQCjHztBDL90GCRXHlGqm0H7kzP04hd0MxwakKjWzOmstXNFLONj\n" +
+ "RFa0JqI/iKUJMFcCMCbLgyzcFW7DihtY5XE0XCLCw+git0NjxiFB6FaOFIlyDdqT\n" +
+ "j+Th+DJ92JLvICVD/g==\n" +
+ "-----END CERTIFICATE-----";
+
// Owner: CN=USERTrust ECC Extended Validation Secure Server CA, O=The USERTRUST Network,
// L=Jersey City, ST=New Jersey, C=US
// Issuer: CN=USERTrust ECC Certification Authority, O=The USERTRUST Network,
// L=Jersey City, ST=New Jersey, C=US
// Serial number: 3d09b24f5c08a7ce8eb85a51d3c1aa52
// Valid from: Sun Apr 14 17:00:00 PDT 2013 until: Fri Apr 14 16:59:59 PDT 2028
- private static final String INT = "-----BEGIN CERTIFICATE-----\n"
+ private static final String INT_REVOKED = "-----BEGIN CERTIFICATE-----\n"
+ "MIIDwTCCA0igAwIBAgIQPQmyT1wIp86OuFpR08GqUjAKBggqhkjOPQQDAzCBiDEL\n"
+ "MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl\n"
+ "eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT\n"
@@ -563,13 +632,58 @@
// Owner: CN=usertrustecccertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=Sectigo Limited,
// STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
+ // OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization, OID.1.3.6.1.4.1.311.60.2.1.3=GB,
+ // SERIALNUMBER=04058690
+ // Issuer: CN=Sectigo ECC Extended Validation Secure Server CA, O=Sectigo Limited, L=Salford,
+ // ST=Greater Manchester, C=GB
+ // Serial number: 8b72489b7f505a55e2a22659c90ed2ab
+ // Valid from: Sun Sep 29 17:00:00 PDT 2019 until: Tue Dec 28 15:59:59 PST 2021
+ private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ "MIIGRTCCBeugAwIBAgIRAItySJt/UFpV4qImWckO0qswCgYIKoZIzj0EAwIwgZEx\n" +
+ "CzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNV\n" +
+ "BAcTB1NhbGZvcmQxGDAWBgNVBAoTD1NlY3RpZ28gTGltaXRlZDE5MDcGA1UEAxMw\n" +
+ "U2VjdGlnbyBFQ0MgRXh0ZW5kZWQgVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENB\n" +
+ "MB4XDTE5MDkzMDAwMDAwMFoXDTIxMTIyODIzNTk1OVowggFBMREwDwYDVQQFEwgw\n" +
+ "NDA1ODY5MDETMBEGCysGAQQBgjc8AgEDEwJHQjEdMBsGA1UEDxMUUHJpdmF0ZSBP\n" +
+ "cmdhbml6YXRpb24xCzAJBgNVBAYTAkdCMQ8wDQYDVQQREwZNNSAzRVExEDAOBgNV\n" +
+ "BAcTB1NhbGZvcmQxFjAUBgNVBAkTDVRyYWZmb3JkIFJvYWQxFjAUBgNVBAkTDUV4\n" +
+ "Y2hhbmdlIFF1YXkxJTAjBgNVBAkTHDNyZCBGbG9vciwgMjYgT2ZmaWNlIFZpbGxh\n" +
+ "Z2UxGDAWBgNVBAoTD1NlY3RpZ28gTGltaXRlZDEaMBgGA1UECxMRQ09NT0RPIEVW\n" +
+ "IFNHQyBTU0wxOzA5BgNVBAMTMnVzZXJ0cnVzdGVjY2NlcnRpZmljYXRpb25hdXRo\n" +
+ "b3JpdHktZXYuY29tb2RvY2EuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE\n" +
+ "LTJfEd92Wlg+h/AVtPsMmwX9Puvi+WGCv3sgFRpur8Iy2kGVpXHRQTCn2j9aky4t\n" +
+ "FQGm7OG2klJA/MEeevKVaaOCA28wggNrMB8GA1UdIwQYMBaAFO/BKpUMMtr7czDc\n" +
+ "ihPYFUv3E+j4MB0GA1UdDgQWBBSzrWHzmiHwx2Rrm7SjRC0UegNrKzAOBgNVHQ8B\n" +
+ "Af8EBAMCB4AwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\n" +
+ "BQUHAwIwSQYDVR0gBEIwQDA1BgwrBgEEAbIxAQIBBQEwJTAjBggrBgEFBQcCARYX\n" +
+ "aHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwBwYFZ4EMAQEwVgYDVR0fBE8wTTBLoEmg\n" +
+ "R4ZFaHR0cDovL2NybC5zZWN0aWdvLmNvbS9TZWN0aWdvRUNDRXh0ZW5kZWRWYWxp\n" +
+ "ZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3JsMIGGBggrBgEFBQcBAQR6MHgwUQYIKwYB\n" +
+ "BQUHMAKGRWh0dHA6Ly9jcnQuc2VjdGlnby5jb20vU2VjdGlnb0VDQ0V4dGVuZGVk\n" +
+ "VmFsaWRhdGlvblNlY3VyZVNlcnZlckNBLmNydDAjBggrBgEFBQcwAYYXaHR0cDov\n" +
+ "L29jc3Auc2VjdGlnby5jb20wPQYDVR0RBDYwNIIydXNlcnRydXN0ZWNjY2VydGlm\n" +
+ "aWNhdGlvbmF1dGhvcml0eS1ldi5jb21vZG9jYS5jb20wggF/BgorBgEEAdZ5AgQC\n" +
+ "BIIBbwSCAWsBaQB2AO5Lvbd1zmC64UJpH6vhnmajD35fsHLYgwDEe4l6qP3LAAAB\n" +
+ "bYL/SJoAAAQDAEcwRQIhAL7EJt/Rgz6NBnx2v8Hevux3Gpcxy64kaeyLVgFeNqFk\n" +
+ "AiBRf+OWLOtZzEav/oERljrk8hgZB4CR1nj/Tn98cmRrwwB2AFWB1MIWkDYBSuoL\n" +
+ "m1c8U/DA5Dh4cCUIFy+jqh0HE9MMAAABbYL/SIgAAAQDAEcwRQIgVtZZaiBMC2lu\n" +
+ "atBzUHQmOq4qrUQP7nS83cd3VzPhToECIQDnlpOCdaxJwr8C0MtkvYpKSabwBPFL\n" +
+ "ASEkwmOpjuQErAB3ALvZ37wfinG1k5Qjl6qSe0c4V5UKq1LoGpCWZDaOHtGFAAAB\n" +
+ "bYL/SJoAAAQDAEgwRgIhAI8OgzP/kzF1bOJRHU2S/ewij/6HpGPy7Mbm7Hyuv3IU\n" +
+ "AiEAxDmX2FmORlgeerQmQ+ar3D9/TwA9RQckVDu5IrgweREwCgYIKoZIzj0EAwID\n" +
+ "SAAwRQIhAPwQWGWd3oR7YJ7ngCDQ9TAbdPgND51SiR34WfEgaTQtAiAxD4umKm02\n" +
+ "59GEMj5NpyF2ZQEq5mEGcjJNojrn+PC4zg==\n" +
+ "-----END CERTIFICATE-----";
+
+ // Owner: CN=usertrustecccertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=Sectigo Limited,
+ // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
// ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
// OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
// Issuer: CN=USERTrust ECC Extended Validation Secure Server CA, O=The USERTRUST Network, L=Jersey City,
// ST=New Jersey, C=US
// Serial number: ab1455f9833ae7783f95de8744181f6a
// Valid from: Wed Nov 28 16:00:00 PST 2018 until: Fri Feb 26 15:59:59 PST 2021
- private static final String VALID = "-----BEGIN CERTIFICATE-----\n" +
+ private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
"MIIGhjCCBiygAwIBAgIRAKsUVfmDOud4P5Xeh0QYH2owCgYIKoZIzj0EAwIwgZUx\n" +
"CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz\n" +
"ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMTswOQYDVQQD\n" +
@@ -607,60 +721,14 @@
"11EPtBSCEhUCIBcyI0yl5dRff6+4x8IeCrLiAOYsfzM7Y/a5uRKFnbYz\n" +
"-----END CERTIFICATE-----";
- // Owner: CN=usertrustecccertificationauthority-ev.comodoca.com, OU=COMODO EV SGC SSL, O=COMODO CA Limited,
- // STREET="3rd Floor, 26 Office Village", STREET=Exchange Quay, STREET=Trafford Road, L=Salford,
- // ST=Greater Manchester, OID.2.5.4.17=M5 3EQ, C=GB, OID.2.5.4.15=Private Organization,
- // OID.1.3.6.1.4.1.311.60.2.1.3=GB, SERIALNUMBER=04058690
- // Issuer: CN=USERTrust ECC Extended Validation Secure Server CA, O=The USERTRUST Network, L=Jersey City,
- // ST=New Jersey, C=US
- // Serial number: 9bd0c93cac9ca2edc1a7dd923316b3c6
- // Valid from: Mon Jul 03 17:00:00 PDT 2017 until: Thu Oct 03 16:59:59 PDT 2019
- private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" +
- "MIIGhzCCBi2gAwIBAgIRAJvQyTysnKLtwafdkjMWs8YwCgYIKoZIzj0EAwIwgZUx\n" +
- "CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz\n" +
- "ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMTswOQYDVQQD\n" +
- "EzJVU0VSVHJ1c3QgRUNDIEV4dGVuZGVkIFZhbGlkYXRpb24gU2VjdXJlIFNlcnZl\n" +
- "ciBDQTAeFw0xNzA3MDQwMDAwMDBaFw0xOTEwMDMyMzU5NTlaMIIBYDERMA8GA1UE\n" +
- "BRMIMDQwNTg2OTAxEzARBgsrBgEEAYI3PAIBAxMCR0IxHTAbBgNVBA8TFFByaXZh\n" +
- "dGUgT3JnYW5pemF0aW9uMQswCQYDVQQGEwJHQjEPMA0GA1UEERMGTTUgM0VRMRsw\n" +
- "GQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcTB1NhbGZvcmQxFjAU\n" +
- "BgNVBAkTDVRyYWZmb3JkIFJvYWQxFjAUBgNVBAkTDUV4Y2hhbmdlIFF1YXkxJTAj\n" +
- "BgNVBAkTHDNyZCBGbG9vciwgMjYgT2ZmaWNlIFZpbGxhZ2UxGjAYBgNVBAoTEUNP\n" +
- "TU9ETyBDQSBMaW1pdGVkMRowGAYDVQQLExFDT01PRE8gRVYgU0dDIFNTTDE7MDkG\n" +
- "A1UEAxMydXNlcnRydXN0ZWNjY2VydGlmaWNhdGlvbmF1dGhvcml0eS1ldi5jb21v\n" +
- "ZG9jYS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQtMl8R33ZaWD6H8BW0\n" +
- "+wybBf0+6+L5YYK/eyAVGm6vwjLaQZWlcdFBMKfaP1qTLi0VAabs4baSUkD8wR56\n" +
- "8pVpo4IDjjCCA4owHwYDVR0jBBgwFoAUKpxa+U6hMNpASyvpS/H1nNwC+S4wHQYD\n" +
- "VR0OBBYEFLOtYfOaIfDHZGubtKNELRR6A2srMA4GA1UdDwEB/wQEAwIFgDAMBgNV\n" +
- "HRMBAf8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBQBgNVHSAE\n" +
- "STBHMDwGDCsGAQQBsjEBAgEFATAsMCoGCCsGAQUFBwIBFh5odHRwczovL2Nwcy50\n" +
- "cnVzdC1wcm92aWRlci5jb20wBwYFZ4EMAQEwXwYDVR0fBFgwVjBUoFKgUIZOaHR0\n" +
- "cDovL2NybC50cnVzdC1wcm92aWRlci5jb20vVVNFUlRydXN0RUNDRXh0ZW5kZWRW\n" +
- "YWxpZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3JsMIGYBggrBgEFBQcBAQSBizCBiDBa\n" +
- "BggrBgEFBQcwAoZOaHR0cDovL2NydC50cnVzdC1wcm92aWRlci5jb20vVVNFUlRy\n" +
- "dXN0RUNDRXh0ZW5kZWRWYWxpZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3J0MCoGCCsG\n" +
- "AQUFBzABhh5odHRwOi8vb2NzcC50cnVzdC1wcm92aWRlci5jb20wPQYDVR0RBDYw\n" +
- "NIIydXNlcnRydXN0ZWNjY2VydGlmaWNhdGlvbmF1dGhvcml0eS1ldi5jb21vZG9j\n" +
- "YS5jb20wggF8BgorBgEEAdZ5AgQCBIIBbASCAWgBZgB1AKS5CZC0GFgUh7sTosxn\n" +
- "cAo8NZgE+RvfuON3zQ7IDdwQAAABXQ0/jQ0AAAQDAEYwRAIgPbaNWgoi6OfyNwL2\n" +
- "+jiySsoLrkx+0d4NJE1WnZQcfzwCICW4yvsXaMxoOXpQp3EPgrYk5Ajfvy/dY3Ui\n" +
- "0/dbQtHxAHYAVhQGmi/XwuzT9eG9RLI+x0Z2ubyZEVzA75SYVdaJ0N0AAAFdDT+K\n" +
- "xwAABAMARzBFAiB3GQasrX+akoHX02ZvXCcvhWCqv6qQOhLCUqflPoRbuAIhALwe\n" +
- "hrQo8S1Tm5vbMcxGiViq5ZcawxENWhxZ9hS0BZweAHUA7ku9t3XOYLrhQmkfq+Ge\n" +
- "ZqMPfl+wctiDAMR7iXqo/csAAAFdDT+M4AAABAMARjBEAiAjvp8w/fdTVW1VGE0T\n" +
- "I0YcCIXTYFDgzUMsEUiKHANAgwIgETQUcac7Hiis2fgQ+GdGF9yuh+xMo2Z8QXNu\n" +
- "1Cknf+8wCgYIKoZIzj0EAwIDSAAwRQIgQ5UiUI7xodmmMYNs3CmqlZHw/04BQRAR\n" +
- "4gRm7blZSIMCIQDHvIWTaPzSO6vwVzs6wSD6FqebLiFxoddC6aZG8Nm0wQ==\n" +
- "-----END CERTIFICATE-----";
-
public void runTest(ValidatePathWithParams pathValidator) throws Exception {
// Validate valid
- pathValidator.validate(new String[]{VALID, INT},
+ pathValidator.validate(new String[]{VALID, INT_VALID},
ValidatePathWithParams.Status.GOOD, null, System.out);
// Validate Revoked
- pathValidator.validate(new String[]{REVOKED, INT},
+ pathValidator.validate(new String[]{REVOKED, INT_REVOKED},
ValidatePathWithParams.Status.REVOKED,
- "Thu Nov 29 10:06:00 PST 2018", System.out);
+ "Wed Oct 02 06:06:50 PDT 2019", System.out);
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/sun/net/www/http/HttpClient/B8209178.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8209178
+ * @modules java.base/sun.net.www java.base/sun.security.x509 java.base/sun.security.tools.keytool
+ * @library /test/lib
+ * @run main/othervm -Dsun.net.http.retryPost=true B8209178
+ * @run main/othervm -Dsun.net.http.retryPost=false B8209178
+ * @summary Proxied HttpsURLConnection doesn't send BODY when retrying POST request
+ */
+
+import java.io.*;
+import java.net.*;
+import java.nio.charset.StandardCharsets;
+import java.security.KeyStore;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.X509Certificate;
+import java.util.HashMap;
+import javax.net.ssl.*;
+
+import com.sun.net.httpserver.*;
+import jdk.test.lib.net.URIBuilder;
+import sun.security.tools.keytool.CertAndKeyGen;
+import sun.security.x509.X500Name;
+
+public class B8209178 {
+ static {
+ try {
+ HttpsURLConnection.setDefaultHostnameVerifier((hostname, session) -> true);
+ SSLContext.setDefault(new TestSSLContext().get());
+ } catch (Exception ex) {
+ throw new ExceptionInInitializerError(ex);
+ }
+ }
+
+ static final String RESPONSE = "<html><body><p>Hello World!</body></html>";
+ static final String PATH = "/foo/";
+ static final String RETRYPOST = System.getProperty("sun.net.http.retryPost");
+
+ static HttpServer createHttpsServer() throws IOException, NoSuchAlgorithmException {
+ HttpsServer server = HttpsServer.create();
+ HttpContext context = server.createContext(PATH);
+ context.setHandler(new HttpHandler() {
+
+ boolean simulateError = true;
+
+ @Override
+ public void handle(HttpExchange he) throws IOException {
+
+ System.out.printf("%s - received request on : %s%n",
+ Thread.currentThread().getName(),
+ he.getRequestURI());
+ System.out.printf("%s - received request headers : %s%n",
+ Thread.currentThread().getName(),
+ new HashMap(he.getRequestHeaders()));
+
+ InputStream requestBody = he.getRequestBody();
+ String body = B8209178.toString(requestBody);
+
+ System.out.printf("%s - received request body : %s%n",
+ Thread.currentThread().getName(), body);
+
+ if (simulateError) {
+ simulateError = false;
+
+ System.out.printf("%s - closing connection unexpectedly ... %n",
+ Thread.currentThread().getName(), he.getRequestHeaders());
+
+ he.close(); // try not to respond anything the first time ...
+ return;
+ }
+
+ he.getResponseHeaders().add("encoding", "UTF-8");
+ he.sendResponseHeaders(200, RESPONSE.length());
+ he.getResponseBody().write(RESPONSE.getBytes(StandardCharsets.UTF_8));
+ he.close();
+ }
+ });
+
+ server.setHttpsConfigurator(new Configurator(SSLContext.getDefault()));
+ server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
+ return server;
+ }
+
+ public static void main(String[] args) throws IOException, NoSuchAlgorithmException {
+ HttpServer server = createHttpsServer();
+ server.start();
+ try {
+ new B8209178().test(server);
+
+ } finally {
+ server.stop(0);
+ System.out.println("Server stopped");
+ }
+ }
+
+ public void test(HttpServer server /*, HttpClient.Version version*/) throws IOException {
+ System.out.println("System property retryPost: " + RETRYPOST);
+ System.out.println("Server is: " + server.getAddress());
+ System.out.println("Verifying communication with server");
+ URI uri = URIBuilder.newBuilder()
+ .scheme("https")
+ .host(server.getAddress().getAddress())
+ .port(server.getAddress().getPort())
+ .path(PATH + "x")
+ .buildUnchecked();
+
+ TunnelingProxy proxy = new TunnelingProxy(server);
+ proxy.start();
+
+ try {
+ System.out.println("Proxy started");
+ Proxy p = new Proxy(Proxy.Type.HTTP,
+ InetSocketAddress.createUnresolved("localhost", proxy.getAddress().getPort()));
+ System.out.println("Verifying communication with proxy");
+
+ callHttpsServerThroughProxy(uri, p);
+
+ } finally {
+ System.out.println("Stopping proxy");
+ proxy.stop();
+ System.out.println("Proxy stopped");
+ }
+ }
+
+ private void callHttpsServerThroughProxy(URI uri, Proxy p) throws IOException {
+ HttpsURLConnection urlConnection = (HttpsURLConnection) uri.toURL().openConnection(p);
+
+ urlConnection.setConnectTimeout(1000);
+ urlConnection.setReadTimeout(3000);
+ urlConnection.setDoInput(true);
+ urlConnection.setDoOutput(true);
+ urlConnection.setRequestMethod("POST");
+ urlConnection.setUseCaches(false);
+
+ urlConnection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded");
+ urlConnection.setRequestProperty("charset", "utf-8");
+ urlConnection.setRequestProperty("Connection", "keep-alive");
+
+ String urlParameters = "param1=a¶m2=b¶m3=c";
+ byte[] postData = urlParameters.getBytes(StandardCharsets.UTF_8);
+
+ OutputStream outputStream = urlConnection.getOutputStream();
+ outputStream.write(postData);
+ outputStream.close();
+
+ int responseCode;
+
+ try {
+ responseCode = urlConnection.getResponseCode();
+ System.out.printf(" ResponseCode : %s%n", responseCode);
+ String output;
+ InputStream inputStream = (responseCode < 400) ? urlConnection.getInputStream() : urlConnection.getErrorStream();
+ output = toString(inputStream);
+ inputStream.close();
+ System.out.printf(" Output from server : %s%n", output);
+
+ if (responseCode == 200) { // OK !
+ } else {
+ throw new RuntimeException("Bad response Code : " + responseCode);
+ }
+ } catch (SocketException se) {
+ if (RETRYPOST.equals("true")) { // Should not get here with the fix
+ throw new RuntimeException("Unexpected Socket Exception: " + se);
+ } else {
+ System.out.println("Socket Exception received as expected: " + se);
+ }
+ }
+ }
+
+ static class TunnelingProxy {
+ final Thread accept;
+ final ServerSocket ss;
+ final boolean DEBUG = false;
+ final HttpServer serverImpl;
+
+ TunnelingProxy(HttpServer serverImpl) throws IOException {
+ this.serverImpl = serverImpl;
+ ss = new ServerSocket();
+ accept = new Thread(this::accept);
+ }
+
+ void start() throws IOException {
+ ss.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
+ accept.start();
+ }
+
+ // Pipe the input stream to the output stream
+ private synchronized Thread pipe(InputStream is, OutputStream os, char tag) {
+ return new Thread("TunnelPipe(" + tag + ")") {
+ @Override
+ public void run() {
+ try {
+ try {
+ int c;
+ while ((c = is.read()) != -1) {
+ os.write(c);
+ os.flush();
+ // if DEBUG prints a + or a - for each transferred
+ // character.
+ if (DEBUG) System.out.print(tag);
+ }
+ is.close();
+ } finally {
+ os.close();
+ }
+ } catch (IOException ex) {
+ if (DEBUG) ex.printStackTrace(System.out);
+ }
+ }
+ };
+ }
+
+ public InetSocketAddress getAddress() {
+ return new InetSocketAddress(ss.getInetAddress(), ss.getLocalPort());
+ }
+
+ // This is a bit shaky. It doesn't handle continuation
+ // lines, but our client shouldn't send any.
+ // Read a line from the input stream, swallowing the final
+ // \r\n sequence. Stops at the first \n, doesn't complain
+ // if it wasn't preceded by '\r'.
+ //
+ String readLine(InputStream r) throws IOException {
+ StringBuilder b = new StringBuilder();
+ int c;
+ while ((c = r.read()) != -1) {
+ if (c == '\n') {
+ break;
+ }
+ b.appendCodePoint(c);
+ }
+ if (b.codePointAt(b.length() - 1) == '\r') {
+ b.delete(b.length() - 1, b.length());
+ }
+ return b.toString();
+ }
+
+ public void accept() {
+ Socket clientConnection = null;
+ try {
+ while (true) {
+ System.out.println("Tunnel: Waiting for client");
+ Socket previous = clientConnection;
+ try {
+ clientConnection = ss.accept();
+ } catch (IOException io) {
+ if (DEBUG) io.printStackTrace(System.out);
+ break;
+ } finally {
+ // we have only 1 client at a time, so it is safe
+ // to close the previous connection here
+ if (previous != null) previous.close();
+ }
+ System.out.println("Tunnel: Client accepted");
+ Socket targetConnection = null;
+ InputStream ccis = clientConnection.getInputStream();
+ OutputStream ccos = clientConnection.getOutputStream();
+ Writer w = new OutputStreamWriter(ccos, "UTF-8");
+ PrintWriter pw = new PrintWriter(w);
+ System.out.println("Tunnel: Reading request line");
+ String requestLine = readLine(ccis);
+ System.out.println("Tunnel: Request status line: " + requestLine);
+ if (requestLine.startsWith("CONNECT ")) {
+ // We should probably check that the next word following
+ // CONNECT is the host:port of our HTTPS serverImpl.
+ // Some improvement for a followup!
+
+ // Read all headers until we find the empty line that
+ // signals the end of all headers.
+ while (!requestLine.equals("")) {
+ System.out.println("Tunnel: Reading header: "
+ + (requestLine = readLine(ccis)));
+ }
+
+ // Open target connection
+ targetConnection = new Socket(
+ serverImpl.getAddress().getAddress(),
+ serverImpl.getAddress().getPort());
+
+ // Then send the 200 OK response to the client
+ System.out.println("Tunnel: Sending "
+ + "HTTP/1.1 200 OK\r\n\r\n");
+ pw.print("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n");
+ pw.flush();
+ } else {
+ // This should not happen
+ throw new IOException("Tunnel: Unexpected status line: "
+ + requestLine);
+ }
+
+ // Pipe the input stream of the client connection to the
+ // output stream of the target connection and conversely.
+ // Now the client and target will just talk to each other.
+ System.out.println("Tunnel: Starting tunnel pipes");
+ Thread t1 = pipe(ccis, targetConnection.getOutputStream(), '+');
+ Thread t2 = pipe(targetConnection.getInputStream(), ccos, '-');
+ t1.start();
+ t2.start();
+
+ // We have only 1 client... wait until it has finished before
+ // accepting a new connection request.
+ System.out.println("Tunnel: Waiting for pipes to close");
+ t1.join();
+ t2.join();
+ System.out.println("Tunnel: Done - waiting for next client");
+ }
+ } catch (Throwable ex) {
+ try {
+ ss.close();
+ } catch (IOException ex1) {
+ ex.addSuppressed(ex1);
+ }
+ ex.printStackTrace(System.err);
+ }
+ }
+
+ void stop() throws IOException {
+ ss.close();
+ }
+ }
+
+ static class Configurator extends HttpsConfigurator {
+ public Configurator(SSLContext ctx) {
+ super(ctx);
+ }
+
+ @Override
+ public void configure(HttpsParameters params) {
+ params.setSSLParameters(getSSLContext().getSupportedSSLParameters());
+ }
+ }
+
+
+ static class TestSSLContext {
+
+ SSLContext ssl;
+
+ public TestSSLContext() throws Exception {
+ init();
+ }
+
+ private void init() throws Exception {
+
+ CertAndKeyGen keyGen = new CertAndKeyGen("RSA", "SHA1WithRSA", null);
+ keyGen.generate(1024);
+
+ //Generate self signed certificate
+ X509Certificate[] chain = new X509Certificate[1];
+ chain[0] = keyGen.getSelfCertificate(new X500Name("CN=ROOT"), (long) 365 * 24 * 3600);
+
+ char[] passphrase = "passphrase".toCharArray();
+
+ KeyStore ks = KeyStore.getInstance("JKS");
+ ks.load(null, passphrase); // must be "initialized" ...
+
+ ks.setKeyEntry("server", keyGen.getPrivateKey(), passphrase, chain);
+
+ KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
+ kmf.init(ks, passphrase);
+
+ TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
+ tmf.init(ks);
+
+ ssl = SSLContext.getInstance("TLS");
+ ssl.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
+ }
+
+ public SSLContext get() {
+ return ssl;
+ }
+ }
+
+ // ###############################################################################################
+
+ private static String toString(InputStream inputStream) throws IOException {
+ StringBuilder sb = new StringBuilder();
+ BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
+ int i = bufferedReader.read();
+ while (i != -1) {
+ sb.append((char) i);
+ i = bufferedReader.read();
+ }
+ bufferedReader.close();
+ return sb.toString();
+ }
+}
--- a/test/jdk/sun/net/www/http/KeepAliveCache/KeepAliveTimerThread.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/sun/net/www/http/KeepAliveCache/KeepAliveTimerThread.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,6 +33,7 @@
import java.net.*;
import java.io.*;
import jdk.test.lib.net.URIBuilder;
+import static java.net.Proxy.NO_PROXY;
public class KeepAliveTimerThread {
static class Fetcher implements Runnable {
@@ -44,7 +45,7 @@
public void run() {
try {
- InputStream in = url.openConnection().getInputStream();
+ InputStream in = url.openConnection(NO_PROXY).getInputStream();
byte b[] = new byte[128];
int n;
do {
--- a/test/jdk/sun/net/www/protocol/http/UserAuth.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/sun/net/www/protocol/http/UserAuth.java Fri Oct 11 12:08:01 2019 +0530
@@ -35,7 +35,7 @@
import java.io.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
-
+import static java.net.Proxy.NO_PROXY;
public class UserAuth
{
@@ -61,7 +61,7 @@
// GET Request
URL url = new URL("http://" + address.getHostName() + ":" + address.getPort() + "/redirect/");
- HttpURLConnection uc = (HttpURLConnection)url.openConnection();
+ HttpURLConnection uc = (HttpURLConnection)url.openConnection(NO_PROXY);
uc.setRequestProperty("Authorization", "testString:ValueDoesNotMatter");
int resp = uc.getResponseCode();
--- a/test/jdk/sun/net/www/protocol/http/UserCookie.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/sun/net/www/protocol/http/UserCookie.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,6 +33,7 @@
import com.sun.net.httpserver.*;
import java.util.*;
import java.io.*;
+import static java.net.Proxy.NO_PROXY;
public class UserCookie
{
@@ -59,7 +60,7 @@
InetSocketAddress address = httpServer.getAddress();
URL url = new URL("http://" + address.getHostName() + ":" + address.getPort() + "/test/");
- HttpURLConnection uc = (HttpURLConnection)url.openConnection();
+ HttpURLConnection uc = (HttpURLConnection)url.openConnection(NO_PROXY);
uc.setRequestProperty("Cookie", "value=ValueDoesNotMatter");
int resp = uc.getResponseCode();
--- a/test/jdk/sun/security/pkcs11/Cipher/TestGCMKeyAndIvCheck.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/sun/security/pkcs11/Cipher/TestGCMKeyAndIvCheck.java Fri Oct 11 12:08:01 2019 +0530
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 8080462
+ * @bug 8080462 8229243
* @library /test/lib ..
* @modules jdk.crypto.cryptoki
* @run main TestGCMKeyAndIvCheck
@@ -81,6 +81,7 @@
", no support for " + mode);
return;
}
+ System.out.println("Testing against " + p.getName());
SecretKey key = new SecretKeySpec(new byte[16], "AES");
// First try parameter-less init.
c.init(Cipher.ENCRYPT_MODE, key);
@@ -111,12 +112,11 @@
throw new Exception("Parameters contains incorrect IV value");
}
- // Should be ok to use the same key+iv for decryption
c.init(Cipher.DECRYPT_MODE, key, params);
c.updateAAD(AAD);
byte[] recovered = c.doFinal(ctPlusTag);
if (!Arrays.equals(recovered, PT)) {
- throw new Exception("decryption result mismatch");
+ throw new Exception("Decryption result mismatch");
}
// Now try to encrypt again using the same key+iv; should fail also
@@ -125,6 +125,7 @@
throw new Exception("Should throw exception when same key+iv is used");
} catch (InvalidAlgorithmParameterException iape) {
// expected
+ System.out.println("Expected IAPE thrown");
}
// Now try to encrypt again using parameter-less init; should work
@@ -138,7 +139,8 @@
}
// Now try to encrypt again using a different parameter; should work
- AlgorithmParameterSpec spec2 = new GCMParameterSpec(128, new byte[30]);
+ AlgorithmParameterSpec spec2 = new GCMParameterSpec(128,
+ "Solaris PKCS11 lib does not allow all-zero IV".getBytes());
c.init(Cipher.ENCRYPT_MODE, key, spec2);
c.updateAAD(AAD);
c.doFinal(PT);
@@ -154,7 +156,7 @@
c.updateAAD(AAD);
recovered = c.doFinal(ctPlusTag);
if (!Arrays.equals(recovered, PT)) {
- throw new Exception("decryption result mismatch");
+ throw new Exception("Decryption result mismatch");
}
// Now try decryption again and re-init using the same parameters
--- a/test/jdk/sun/security/pkcs11/Cipher/TestKATForGCM.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/sun/security/pkcs11/Cipher/TestKATForGCM.java Fri Oct 11 12:08:01 2019 +0530
@@ -30,12 +30,13 @@
* @summary Known Answer Test for AES cipher with GCM mode support in
* PKCS11 provider.
*/
-import java.security.*;
-import javax.crypto.*;
-import javax.crypto.spec.*;
-import java.math.*;
-
-import java.util.*;
+import java.security.GeneralSecurityException;
+import java.security.Provider;
+import java.util.Arrays;
+import javax.crypto.Cipher;
+import javax.crypto.SecretKey;
+import javax.crypto.spec.GCMParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
public class TestKATForGCM extends PKCS11Test {
@@ -319,15 +320,21 @@
System.out.println("Test Passed!");
}
} catch (Exception e) {
- double ver = getNSSInfo("nss");
- if (ver < 3.251d && p.getName().contains("SunPKCS11-NSS") &&
- System.getProperty("os.name").equals("SunOS")) {
- // buggy behaviour from solaris on 11.2 OS (nss < 3.251)
- System.out.println("Skipping: SunPKCS11-NSS: Old NSS: " + ver);
- return; // OK
- } else {
- throw e;
+ System.out.println("Exception occured using " + p.getName() + " version " + p.getVersionStr());
+
+ if (isNSS(p)) {
+ double ver = getNSSInfo("nss");
+ String osName = System.getProperty("os.name");
+ if (ver < 3.251d && osName.equals("SunOS")) {
+ // buggy behaviour from solaris on 11.2 OS (nss < 3.251)
+ System.out.println("Skipping: SunPKCS11-NSS: Old NSS: " + ver);
+ return; // OK
+ } else if (ver > 3.139 && ver < 3.15 && osName.equals("Linux")) {
+ // warn about buggy behaviour on Linux with nss 3.14
+ System.out.println("Warning: old NSS " + ver + " might be problematic, consider upgrading it");
+ }
}
+ throw e;
}
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/sun/tools/jhsdb/HeapDumpTestWithActiveProcess.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8230731 8001227 8231635 8231634 8196969
+ * @requires vm.hasSAandCanAttach
+ * @library /test/lib
+ * @compile JShellHeapDumpTest.java
+ * @run main/timeout=240 JShellHeapDumpTest nosleep
+ */
--- a/test/jdk/sun/tools/jhsdb/JShellHeapDumpTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jdk/sun/tools/jhsdb/JShellHeapDumpTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -39,6 +39,7 @@
import java.util.Map;
import jdk.test.lib.JDKToolLauncher;
+import jdk.test.lib.JDKToolFinder;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.hprof.parser.HprofReader;
@@ -47,40 +48,43 @@
public class JShellHeapDumpTest {
- protected static Process process;
-
- private static long pid;
+ static Process jShellProcess;
+ static boolean doSleep = true; // By default do a short sleep when app starts up
public static void launch(String expectedMessage, List<String> toolArgs)
throws IOException {
try {
launchJshell();
+ long jShellPID = jShellProcess.pid();
- System.out.println("Starting " + toolArgs.get(0) + " against " + pid);
+ System.out.println("Starting " + toolArgs.get(0) + " against " + jShellPID);
JDKToolLauncher launcher = JDKToolLauncher.createUsingTestJDK("jhsdb");
for (String cmd : toolArgs) {
launcher.addToolArg(cmd);
}
- launcher.addToolArg("--pid=" + Long.toString(pid));
+ launcher.addToolArg("--pid=" + Long.toString(jShellPID));
ProcessBuilder processBuilder = new ProcessBuilder(launcher.getCommand());
- processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
OutputAnalyzer output = ProcessTools.executeProcess(processBuilder);
- System.out.println("stdout:");
+ System.out.println("jhsdb jmap stdout:");
System.out.println(output.getStdout());
- System.out.println("stderr:");
+ System.out.println("jhsdb jmap stderr:");
System.out.println(output.getStderr());
- output.shouldNotContain("null");
+ System.out.println("###### End of all output:");
output.shouldHaveExitValue(0);
} catch (Exception ex) {
throw new RuntimeException("Test ERROR " + ex, ex);
} finally {
- if (process.isAlive()) {
- process.destroy();
- }
+ if (jShellProcess.isAlive()) {
+ System.out.println("Destroying jshell");
+ jShellProcess.destroy();
+ System.out.println("Jshell destroyed");
+ } else {
+ System.out.println("Jshell not alive");
+ }
}
}
@@ -102,42 +106,59 @@
}
public static void testHeapDump() throws IOException {
- File dump = new File("jhsdb.jmap.heap." +
+ File hprofFile = new File("jhsdb.jmap.heap." +
System.currentTimeMillis() + ".hprof");
- if (dump.exists()) {
- dump.delete();
+ if (hprofFile.exists()) {
+ hprofFile.delete();
}
launch("heap written to", "jmap",
- "--binaryheap", "--dumpfile=" + dump.getAbsolutePath());
+ "--binaryheap", "--dumpfile=" + hprofFile.getAbsolutePath());
+
+ assertTrue(hprofFile.exists() && hprofFile.isFile(),
+ "Could not create dump file " + hprofFile.getAbsolutePath());
- assertTrue(dump.exists() && dump.isFile(),
- "Could not create dump file " + dump.getAbsolutePath());
+ printStackTraces(hprofFile.getAbsolutePath());
- printStackTraces(dump.getAbsolutePath());
-
- dump.delete();
+ System.out.println("hprof file size: " + hprofFile.length());
+ hprofFile.delete();
}
public static void launchJshell() throws IOException {
System.out.println("Starting Jshell");
- String jdkPath = System.getProperty("test.jdk");
- if (jdkPath == null) {
- // we are not under jtreg, try env
- Map<String, String> env = System.getenv();
- jdkPath = env.get("TESTJAVA");
+ long startTime = System.currentTimeMillis();
+ try {
+ ProcessBuilder pb = new ProcessBuilder(JDKToolFinder.getTestJDKTool("jshell"));
+ jShellProcess = ProcessTools.startProcess("JShell", pb,
+ s -> { // warm-up predicate
+ return s.contains("Welcome to JShell");
+ });
+ } catch (Exception ex) {
+ throw new RuntimeException("Test ERROR " + ex, ex);
}
- if (jdkPath == null) {
- throw new RuntimeException("Can't determine jdk path neither test.jdk property no TESTJAVA env are set");
+
+ long elapsedTime = System.currentTimeMillis() - startTime;
+ System.out.println("Jshell Started in " + elapsedTime + "ms");
+
+ // Give jshell a chance to fully start up. This makes SA more stable for the jmap dump.
+ try {
+ if (doSleep) {
+ Thread.sleep(2000);
+ }
+ } catch (Exception e) {
}
- String osname = System.getProperty("os.name");
- String jshell = jdkPath + ((osname.startsWith("window")) ? "/bin/jshell.exe" : "/bin/jshell");
- process = Runtime.getRuntime().exec(jshell);
- pid = process.pid();
}
public static void main(String[] args) throws Exception {
-
+ if (args.length == 1) {
+ if (args[0].equals("nosleep")) {
+ doSleep = false;
+ } else {
+ throw new RuntimeException("Invalid arg: " + args[0]);
+ }
+ } else if (args.length != 0) {
+ throw new RuntimeException("Too many args: " + args.length);
+ }
testHeapDump();
// The test throws RuntimeException on error.
--- a/test/jtreg-ext/requires/VMProps.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/jtreg-ext/requires/VMProps.java Fri Oct 11 12:08:01 2019 +0530
@@ -235,7 +235,22 @@
*/
protected String vmJvmci() {
// builds with jvmci have this flag
- return "" + (WB.getBooleanVMFlag("EnableJVMCI") != null);
+ if (WB.getBooleanVMFlag("EnableJVMCI") == null) {
+ return "false";
+ }
+
+ switch (GC.selected()) {
+ case Serial:
+ case Parallel:
+ case G1:
+ // These GCs are supported with JVMCI
+ return "true";
+ default:
+ break;
+ }
+
+ // Every other GC is not supported
+ return "false";
}
/**
@@ -290,6 +305,7 @@
*/
protected void vmOptFinalFlags(SafeMap map) {
vmOptFinalFlag(map, "ClassUnloading");
+ vmOptFinalFlag(map, "ClassUnloadingWithConcurrentMark");
vmOptFinalFlag(map, "UseCompressedOops");
vmOptFinalFlag(map, "EnableJVMCI");
vmOptFinalFlag(map, "EliminateAllocations");
@@ -356,7 +372,24 @@
} else {
jaotc = bin.resolve("jaotc");
}
- return "" + Files.exists(jaotc);
+
+ if (!Files.exists(jaotc)) {
+ // No jaotc => no AOT
+ return "false";
+ }
+
+ switch (GC.selected()) {
+ case Serial:
+ case Parallel:
+ case G1:
+ // These GCs are supported with AOT
+ return "true";
+ default:
+ break;
+ }
+
+ // Every other GC is not supported
+ return "false";
}
/*
--- a/test/langtools/jdk/javadoc/doclet/testMemberInheritance/TestMemberInheritance.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/langtools/jdk/javadoc/doclet/testMemberInheritance/TestMemberInheritance.java Fri Oct 11 12:08:01 2019 +0530
@@ -24,7 +24,7 @@
/*
* @test
* @bug 4638588 4635809 6256068 6270645 8025633 8026567 8162363 8175200
- * 8192850 8182765
+ * 8192850 8182765 8220217
* @summary Test to make sure that members are inherited properly in the Javadoc.
* Verify that inheritance labels are correct.
* @author jamieh
@@ -47,7 +47,7 @@
public void test() {
javadoc("-d", "out",
"-sourcepath", testSrc,
- "pkg", "diamond", "inheritDist", "pkg1");
+ "pkg", "diamond", "inheritDist", "pkg1", "pkg2");
checkExit(Exit.OK);
checkOutput("pkg/SubClass.html", true,
@@ -104,5 +104,27 @@
+ "<code><a href=\"Interface.html#between(java.time.chrono.ChronoLocalDate"
+ ",java.time.chrono.ChronoLocalDate)\">between</a></code>"
);
+
+ checkOutput("pkg2/DocumentedNonGenericChild.html", true,
+ "<section class=\"description\">\n<hr>\n"
+ + "<pre>public abstract class <span class=\"typeNameLabel\">"
+ + "DocumentedNonGenericChild</span>\n"
+ + "extends java.lang.Object</pre>\n"
+ + "</section>");
+
+ checkOutput("pkg2/DocumentedNonGenericChild.html", true,
+ "<td class=\"colFirst\"><code>protected abstract java.lang.String</code></td>\n"
+ + "<th class=\"colSecond\" scope=\"row\"><code><span class=\"memberNameLink\">"
+ + "<a href=\"#parentMethod()\">parentMethod</a></span>()</code></th>\n"
+ + "<td class=\"colLast\">\n"
+ + "<div class=\"block\">Returns some value.</div>\n"
+ + "</td>\n");
+
+ checkOutput("pkg2/DocumentedNonGenericChild.html", true,
+ "<h3><a id=\"parentMethod()\">parentMethod</a></h3>\n"
+ + "<div class=\"memberSignature\"><span class=\"modifiers\">protected abstract</span>"
+ + " <span class=\"returnType\">java.lang.String</span> "
+ + "<span class=\"memberName\">parentMethod</span>()</div>");
+
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/jdk/javadoc/doclet/testMemberInheritance/pkg2/DocumentedNonGenericChild.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package pkg2;
+
+public abstract class DocumentedNonGenericChild extends UndocumentedGenericParent<String> {
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/jdk/javadoc/doclet/testMemberInheritance/pkg2/UndocumentedGenericParent.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package pkg2;
+
+abstract class UndocumentedGenericParent<T> {
+ /**
+ * Returns some value.
+ *
+ * @return some value
+ */
+ protected abstract String parentMethod();
+}
--- a/test/langtools/tools/javac/lib/DPrinter.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/langtools/tools/javac/lib/DPrinter.java Fri Oct 11 12:08:01 2019 +0530
@@ -437,7 +437,8 @@
Scope scope = (Scope) getField(e, e.getClass(), "scope");
return "(" + sym.name + ":" + sym
+ ",shdw:" + entryToString(callMethod(e, e.getClass(), "next"), table, true)
- + ",sibl:" + entryToString(getField(e, e.getClass(), "sibling"), table, true)
+ + ",nextSibling:" + entryToString(getField(e, e.getClass(), "nextSibling"), table, true)
+ + ",prevSibling:" + entryToString(getField(e, e.getClass(), "prevSibling"), table, true)
+ ((sym.owner != scope.owner)
? (",BOGUS[" + sym.owner + "," + scope.owner + "]")
: "")
--- a/test/langtools/tools/javac/scope/RemoveSymbolUnitTest.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/langtools/tools/javac/scope/RemoveSymbolUnitTest.java Fri Oct 11 12:08:01 2019 +0530
@@ -33,8 +33,13 @@
import com.sun.tools.javac.util.*;
import com.sun.tools.javac.code.*;
import com.sun.tools.javac.code.Scope.*;
+import com.sun.tools.javac.code.Symbol;
import com.sun.tools.javac.code.Symbol.*;
import com.sun.tools.javac.file.JavacFileManager;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
public class RemoveSymbolUnitTest {
@@ -63,36 +68,66 @@
// Try enter and remove in different shuffled combinations.
// working with fresh scope each time.
- WriteableScope cs = WriteableScope.create(clazz);
- cs.enter(v);
- cs.enter(m);
+ WriteableScope cs = writeableScope(clazz, v, m);
cs.remove(v);
Symbol s = cs.findFirst(hasNext);
if (s != m)
throw new AssertionError("Wrong symbol");
- cs = WriteableScope.create(clazz);
- cs.enter(m);
- cs.enter(v);
+ cs = writeableScope(clazz, m, v);
cs.remove(v);
s = cs.findFirst(hasNext);
if (s != m)
throw new AssertionError("Wrong symbol");
- cs = WriteableScope.create(clazz);
- cs.enter(v);
- cs.enter(m);
+ cs = writeableScope(clazz, v, m);
cs.remove(m);
s = cs.findFirst(hasNext);
if (s != v)
throw new AssertionError("Wrong symbol");
- cs = WriteableScope.create(clazz);
+ cs = writeableScope(clazz);
cs.enter(m);
cs.enter(v);
cs.remove(m);
s = cs.findFirst(hasNext);
if (s != v)
throw new AssertionError("Wrong symbol");
+
+ // Test multiple removals in the same scope.
+ VarSymbol v1 = new VarSymbol(0, names.fromString("name1"), Type.noType, clazz);
+ VarSymbol v2 = new VarSymbol(0, names.fromString("name2"), Type.noType, clazz);
+ VarSymbol v3 = new VarSymbol(0, names.fromString("name3"), Type.noType, clazz);
+ VarSymbol v4 = new VarSymbol(0, names.fromString("name4"), Type.noType, clazz);
+
+ cs = writeableScope(clazz, v1, v2, v3, v4);
+ cs.remove(v2);
+ assertRemainingSymbols(cs, v1, v3, v4);
+ cs.remove(v3);
+ assertRemainingSymbols(cs, v1, v4);
+ cs.remove(v1);
+ assertRemainingSymbols(cs, v4);
+ cs.remove(v4);
+ assertRemainingSymbols(cs);
+ }
+
+ private WriteableScope writeableScope(ClassSymbol classSymbol, Symbol... symbols) {
+ WriteableScope cs = WriteableScope.create(classSymbol);
+ for (Symbol symbol : symbols) {
+ cs.enter(symbol);
+ }
+ return cs;
+ }
+
+ private void assertRemainingSymbols(WriteableScope cs, Symbol... symbols) {
+ List<Symbol> expectedSymbols = Arrays.asList(symbols);
+ List<Symbol> actualSymbols = new ArrayList<>();
+ cs.getSymbols().forEach(symbol -> actualSymbols.add(symbol));
+ // The symbols are stored in reverse order
+ Collections.reverse(actualSymbols);
+ if (!actualSymbols.equals(expectedSymbols)) {
+ throw new AssertionError(
+ String.format("Wrong symbols: %s. Expected %s", actualSymbols, expectedSymbols));
+ }
}
}
--- a/test/lib/jdk/test/lib/SA/SATestUtils.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/lib/jdk/test/lib/SA/SATestUtils.java Fri Oct 11 12:08:01 2019 +0530
@@ -22,12 +22,18 @@
*/
package jdk.test.lib.SA;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
-import java.util.ArrayList;
+import java.util.concurrent.TimeUnit;
+import java.util.zip.GZIPInputStream;
+
import jdk.test.lib.Asserts;
import jdk.test.lib.Platform;
-import java.util.concurrent.TimeUnit;
+import jtreg.SkippedException;
public class SATestUtils {
@@ -77,4 +83,22 @@
outStringList.addAll(cmdStringList);
return outStringList;
}
+
+ public static void unzipCores(File dir) {
+ File[] gzCores = dir.listFiles((directory, name) -> name.matches("core(\\.\\d+)?\\.gz"));
+ for (File gzCore : gzCores) {
+ String coreFileName = gzCore.getName().replace(".gz", "");
+ System.out.println("Unzipping core into " + coreFileName);
+ try (GZIPInputStream gzis = new GZIPInputStream(new FileInputStream(gzCore));
+ FileOutputStream fos = new FileOutputStream(coreFileName)) {
+ byte[] buffer = new byte[1024];
+ int length;
+ while ((length = gzis.read(buffer)) > 0) {
+ fos.write(buffer, 0, length);
+ }
+ } catch (IOException e) {
+ throw new SkippedException("Not able to unzip file: " + gzCore.getAbsolutePath(), e);
+ }
+ }
+ }
}
--- a/test/lib/sun/hotspot/WhiteBox.java Wed Oct 09 17:06:06 2019 -0700
+++ b/test/lib/sun/hotspot/WhiteBox.java Fri Oct 11 12:08:01 2019 +0530
@@ -511,6 +511,7 @@
// Safepoint Checking
public native void assertMatchingSafepointCalls(boolean mutexSafepointValue, boolean attemptedNoSafepointValue);
+ public native void assertSpecialLock(boolean allowVMBlock, boolean safepointCheck);
// Sharing & archiving
public native String getDefaultArchivePath();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/micro/org/openjdk/bench/java/lang/StackWalkBench.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2015, 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.openjdk.bench.java.lang;
+
+import java.lang.StackWalker.StackFrame;
+import java.util.concurrent.TimeUnit;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.infra.Blackhole;
+
+/**
+ * Benchmarks for java.lang.StackWalker
+ */
+@State(value=Scope.Benchmark)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class StackWalkBench {
+ private static final StackWalker WALKER_DEFAULT = StackWalker.getInstance();
+
+ private static final StackWalker WALKER_CLASS =
+ StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE);
+
+ // TestStack will add this number of calls to the call stack
+ @Param({"4", "100", "1000"})
+ // For more thorough testing, consider:
+ // @Param({"4", "10", "100", "256", "1000"})
+ public int depth;
+
+ // Only used by swFilterCallerClass, to specify (roughly) how far back the
+ // call stack the target class will be found. Not needed by other
+ // benchmarks, so not a @Param by default.
+ // @Param({"4"})
+ public int mark = 4;
+
+ /** Build a call stack of a given size, then run trigger code in it.
+ * (Does not account for existing frames higher up in the JMH machinery).
+ */
+ public static class TestStack {
+ final long fence;
+ long current;
+ final Runnable trigger;
+
+ public TestStack(long max, Runnable trigger) {
+ this.fence = max;
+ this.current = 0;
+ this.trigger = trigger;
+ }
+
+ public void start() {
+ one();
+ }
+
+ public void one() {
+ if (check()) {
+ two();
+ }
+ }
+
+ void two() {
+ if (check()) {
+ three();
+ }
+ }
+
+ private void three() {
+ if (check()) {
+ one();
+ }
+ }
+
+ boolean check() {
+ if (++current == fence) {
+ trigger.run();
+ return false;
+ } else {
+ return true;
+ }
+ }
+ }
+
+ /* Class to look for when testing filtering */
+ static class TestMarker {
+ public void call(MarkedTestStack test) {
+ test.marked();
+ }
+ }
+
+ /** Call stack to test filtering.
+ * TestMarker will make a call on the stack.
+ */
+ static class MarkedTestStack extends TestStack {
+ long mark;
+
+ /**
+ * @param mark How far back the stack should the TestMarker be found?
+ */
+ public MarkedTestStack(long max, long mark, Runnable trigger) {
+ super(max, trigger);
+ if (mark > max) {
+ throw new IllegalArgumentException("mark must be <= max");
+ }
+ this.mark = max - mark; // Count backwards from the completed call stack
+ }
+ @Override
+ public void start() {
+ if (mark == 0) {
+ mark();
+ } else {
+ super.one();
+ }
+ }
+ @Override
+ boolean check() {
+ if (++current == mark) {
+ mark();
+ return false;
+ } else if (current == fence) {
+ trigger.run();
+ return false;
+ } else {
+ return true;
+ }
+ }
+ void mark() {
+ new TestMarker().call(this);
+ }
+ public void marked() {
+ if (current < fence) {
+ if (check()) {
+ one();
+ }
+ } else {
+ trigger.run();
+ }
+ }
+ }
+
+ /**
+ * StackWalker.forEach() with default options
+ */
+ @Benchmark
+ public void forEach_DefaultOpts(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ WALKER_DEFAULT.forEach(localBH::consume);
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ /**
+ * Use Stackwalker.walk() to fetch class names
+ */
+ @Benchmark
+ public void walk_ClassNames(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ WALKER_DEFAULT.walk(s -> {
+ s.map(StackFrame::getClassName).forEach(localBH::consume);
+ return null;
+ });
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ /**
+ * Use Stackwalker.walk() to fetch method names
+ */
+ @Benchmark
+ public void walk_MethodNames(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ WALKER_DEFAULT.walk( s -> {
+ s.map(StackFrame::getMethodName).forEach(localBH::consume);
+ return null;
+ });
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ /**
+ * Use Stackwalker.walk() to fetch declaring class instances
+ */
+ @Benchmark
+ public void walk_DeclaringClass(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ WALKER_CLASS.walk(s -> {
+ s.map(StackFrame::getDeclaringClass).forEach(localBH::consume);
+ return null;
+ });
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ /**
+ * Use StackWalker.walk() to fetch StackTraceElements
+ */
+ @Benchmark
+ public void walk_StackTraceElements(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ WALKER_DEFAULT.walk(s -> {
+ s.map(StackFrame::toStackTraceElement).forEach(localBH::consume);
+ return null;
+ });
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ /**
+ * StackWalker.getCallerClass()
+ */
+ @Benchmark
+ public void getCallerClass(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ localBH.consume(WALKER_CLASS.getCallerClass());
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ /**
+ * Use StackWalker.walk() to filter the StackFrames, looking for the
+ * TestMarker class, which will be (approximately) 'mark' calls back up the
+ * call stack.
+ */
+ @Benchmark
+ public void walk_filterCallerClass(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+
+ new MarkedTestStack(depth, mark, new Runnable() {
+ public void run() {
+ // To be comparable with Reflection.getCallerClass(), return the Class object
+ WALKER_CLASS.walk(s -> {
+ localBH.consume(s.filter(f -> TestMarker.class.equals(f.getDeclaringClass())).findFirst().get().getDeclaringClass());
+ return null;
+ });
+ done[0] = true;
+ }
+ }).start();
+
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ /**
+ * Use StackWalker.walk() to filter the StackFrames, looking for the
+ * TestMarker class, which will be (approximately) depth/2 calls back up the
+ * call stack.
+ */
+ @Benchmark
+ public void walk_filterCallerClassHalfStack(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+
+ new MarkedTestStack(depth, depth / 2, new Runnable() {
+ public void run() {
+ // To be comparable with Reflection.getCallerClass(), return the Class object
+ WALKER_CLASS.walk(s -> {
+ localBH.consume(s.filter((f) -> TestMarker.class.equals(f.getDeclaringClass())).findFirst().get().getDeclaringClass());
+ return null;
+ });
+ done[0] = true;
+ }
+ }).start();
+
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ // TODO: add swConsumeFramesWithReflection
+ // TODO: add swFilterOutStreamClasses
+
+// // This benchmark is for collecting performance counter data
+// static PerfCounter streamTime = PerfCounter.newPerfCounter("jdk.stackwalk.testStreamsElapsedTime");
+// static PerfCounter numStream = PerfCounter.newPerfCounter("jdk.stackwalk.numTestStreams");
+// // @Benchmark
+// public void swStkFrmsTimed(Blackhole bh) {
+// final Blackhole localBH = bh;
+// final boolean[] done = {false};
+// new TestStack(depth, new Runnable() {
+// public void run() {
+// long t0 = System.nanoTime();
+// WALKER_DEFAULT.forEach(localBH::consume);
+// streamTime.addElapsedTimeFrom(t0);
+// numStream.increment();
+// done[0] = true;
+// }
+// }).start();
+// if (!done[0]) {
+// throw new RuntimeException();
+// }
+// }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/micro/org/openjdk/bench/java/math/FpRoundingBenchmark.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,78 @@
+//
+// Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+package org.openjdk.bench.java.math;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@State(Scope.Thread)
+public class FpRoundingBenchmark {
+
+ @Param({"1024"})
+ public int TESTSIZE;
+
+ public double[] DargV1;
+
+ public double[] Res;
+
+ public final double[] DspecialVals = {
+ 0.0, -0.0, Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY};
+
+ @Setup(Level.Trial)
+ public void BmSetup() {
+ int i = 0;
+ Random r = new Random(1024);
+ DargV1 = new double[TESTSIZE];
+ Res = new double[TESTSIZE];
+
+ for (; i < DspecialVals.length; i++) {
+ DargV1[i] = DspecialVals[i];
+ }
+
+ for (; i < TESTSIZE; i++) {
+ DargV1[i] = r.nextDouble()*TESTSIZE;
+ }
+ }
+
+ @Benchmark
+ public void testceil(Blackhole bh) {
+ for (int i = 0; i < TESTSIZE; i++)
+ Res[i] = Math.ceil(DargV1[i]);
+ }
+
+ @Benchmark
+ public void testfloor(Blackhole bh) {
+ for (int i = 0; i < TESTSIZE; i++)
+ Res[i] = Math.floor(DargV1[i]);
+ }
+
+ @Benchmark
+ public void testrint(Blackhole bh) {
+ for (int i = 0; i < TESTSIZE; i++)
+ Res[i] = Math.rint(DargV1[i]);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/micro/org/openjdk/bench/java/security/GetContext.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.openjdk.bench.java.security;
+
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+
+import java.security.AccessControlContext;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Benchmark measuring AccessController.getContext
+ */
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Thread)
+public abstract class GetContext {
+
+ public static class Top extends GetContext {
+
+ @Benchmark
+ public AccessControlContext testNonPriv() {
+ return AccessController.getContext();
+ }
+
+ @Benchmark
+ public AccessControlContext testPriv() {
+ PrivilegedAction<AccessControlContext> pa = () -> AccessController.getContext();
+ return AccessController.doPrivileged(pa);
+ }
+ }
+
+ public static class Deep extends GetContext {
+
+ @Param({"2", "50"})
+ int depth;
+
+ private AccessControlContext recurse(int depth) {
+ if (depth > 0) {
+ return recurse(depth - 1);
+ } else {
+ return AccessController.getContext();
+ }
+ }
+
+ @Benchmark
+ public AccessControlContext testNonPrivRecurse() {
+ return recurse(depth);
+ }
+
+ @Benchmark
+ public AccessControlContext testPrivInline() {
+ PrivilegedAction<AccessControlContext> pa = () -> recurse(depth);
+ return AccessController.doPrivileged(pa);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/micro/org/openjdk/bench/java/util/logging/LoggingRuntimeMicros.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015, 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.openjdk.bench.java.util.logging;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.LogRecord;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.infra.Blackhole;
+
+@State(value = Scope.Benchmark)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class LoggingRuntimeMicros {
+
+ // TestStack will add this number of calls to the call stack
+ @Param({"4", "100", "1000"})
+ // For more thorough testing, consider:
+ // @Param({"4", "10", "100", "256", "1000"})
+ public int depth;
+
+ /** Logging handler for testing logging calls. */
+ @State(value = Scope.Thread) // create a separate one for each worker thread
+ public static class TestHandler extends java.util.logging.Handler {
+ private final static AtomicInteger serialNum = new AtomicInteger(0);
+
+ private final java.util.logging.Logger logger;
+ private volatile LogRecord record;
+
+ public TestHandler() {
+ // Each instance uses its own logger
+ logger = java.util.logging.Logger.getLogger("StackWalkBench" + serialNum.incrementAndGet());
+ logger.setUseParentHandlers(false);
+ logger.addHandler(this);
+ }
+
+ @Override
+ public void publish(LogRecord record) {
+ record.getSourceMethodName();
+ this.record = record;
+ }
+
+ private LogRecord reset() {
+ LogRecord record = this.record;
+ this.record = null;
+ return record;
+ }
+
+ public final LogRecord testInferCaller(String msg) {
+ logger.info(msg);
+ LogRecord rec = this.reset();
+ if (!"testInferCaller".equals(rec.getSourceMethodName())) {
+ throw new RuntimeException("bad caller: "
+ + rec.getSourceClassName() + "."
+ + rec.getSourceMethodName());
+ }
+ return rec;
+ }
+
+ public final LogRecord testLogp(String msg) {
+ logger.logp(java.util.logging.Level.INFO, "foo", "bar", msg);
+ LogRecord rec = this.reset();
+ if (!"bar".equals(rec.getSourceMethodName())) {
+ throw new RuntimeException("bad caller: "
+ + rec.getSourceClassName() + "."
+ + rec.getSourceMethodName());
+ }
+ return rec;
+ }
+ @Override public void flush() {}
+ @Override public void close() throws SecurityException {}
+ }
+
+ /** Build a call stack of a given size, then run trigger code in it.
+ * (Does not account for existing frames higher up in the JMH machinery).
+ */
+ static class TestStack {
+ final long fence;
+ long current;
+ final Runnable trigger;
+
+ TestStack(long max, Runnable trigger) {
+ this.fence = max;
+ this.current = 0;
+ this.trigger = trigger;
+ }
+
+ public void start() {
+ one();
+ }
+
+ public void one() {
+ if (check()) {
+ two();
+ }
+ }
+
+ void two() {
+ if (check()) {
+ three();
+ }
+ }
+
+ private void three() {
+ if (check()) {
+ one();
+ }
+ }
+
+ boolean check() {
+ if (++current == fence) {
+ trigger.run();
+ return false;
+ } else {
+ return true;
+ }
+ }
+ }
+
+ @Benchmark
+ public void testLoggingInferCaller(TestHandler handler, Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ localBH.consume(handler.testInferCaller("test"));
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ @Benchmark
+ public void testLoggingLogp(TestHandler handler, Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ localBH.consume(handler.testLogp("test"));
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/micro/org/openjdk/bench/vm/lang/ThrowableRuntimeMicros.java Fri Oct 11 12:08:01 2019 +0530
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2015, 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.openjdk.bench.vm.lang;
+
+import java.util.concurrent.TimeUnit;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.infra.Blackhole;
+
+@State(value = Scope.Benchmark)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class ThrowableRuntimeMicros {
+
+ // TestStack will add this number of calls to the call stack
+ @Param({"4", "100", "1000"})
+ // For more thorough testing, consider:
+ // @Param({"4", "10", "100", "256", "1000"})
+ public int depth;
+
+ /** Build a call stack of a given size, then run trigger code in it.
+ * (Does not account for existing frames higher up in the JMH machinery).
+ */
+ static class TestStack {
+ final long fence;
+ long current;
+ final Runnable trigger;
+
+ TestStack(long max, Runnable trigger) {
+ this.fence = max;
+ this.current = 0;
+ this.trigger = trigger;
+ }
+
+ public void start() {
+ one();
+ }
+
+ public void one() {
+ if (check()) {
+ two();
+ }
+ }
+
+ void two() {
+ if (check()) {
+ three();
+ }
+ }
+
+ private void three() {
+ if (check()) {
+ one();
+ }
+ }
+
+ boolean check() {
+ if (++current == fence) {
+ trigger.run();
+ return false;
+ } else {
+ return true;
+ }
+ }
+ }
+
+ @Benchmark
+ public void testThrowableInit(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ localBH.consume(new Throwable());
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ @Benchmark
+ public void testThrowableGetStackTrace(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ localBH.consume(new Throwable().getStackTrace());
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+
+ @Benchmark
+ public void testThrowableSTEtoString(Blackhole bh) {
+ final Blackhole localBH = bh;
+ final boolean[] done = {false};
+ new TestStack(depth, new Runnable() {
+ public void run() {
+ Throwable t = new Throwable();
+ for (StackTraceElement ste : t.getStackTrace()) {
+ localBH.consume(ste.toString());
+ }
+ done[0] = true;
+ }
+ }).start();
+ if (!done[0]) {
+ throw new RuntimeException();
+ }
+ }
+}