--- a/.hgtags Thu Oct 24 17:14:42 2019 -0400
+++ b/.hgtags Mon Oct 28 11:21:43 2019 -0400
@@ -592,3 +592,4 @@
5c83830390baafb76a1fbe33443c57620bd45fb9 jdk-14+17
e84d8379815ba0d3e50fb096d28c25894cb50b8c jdk-14+18
9b67dd88a9313e982ec5f710a7747161bc8f0c23 jdk-14+19
+54ffb15c48399dd59922ee22bb592d815307e77c jdk-14+20
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -142,8 +142,7 @@
size_t ZPlatformAddressOffsetBits() {
const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB
- const size_t virtual_to_physical_ratio = 7; // 7:1
- const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * virtual_to_physical_ratio);
+ const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
}
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -24,23 +24,13 @@
#ifndef CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
#define CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
-//
-// Page Allocation Tiers
-// ---------------------
-//
-// Page Type Page Size Object Size Limit Object Alignment
-// ------------------------------------------------------------------
-// Small 2M <= 265K <MinObjAlignmentInBytes>
-// Medium 32M <= 4M 4K
-// Large X*M > 4M 2M
-// ------------------------------------------------------------------
-//
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
+const size_t ZPlatformHeapViews = 3;
const size_t ZPlatformNMethodDisarmedOffset = 4;
const size_t ZPlatformCacheLineSize = 64;
-uintptr_t ZPlatformAddressBase();
-size_t ZPlatformAddressOffsetBits();
-size_t ZPlatformAddressMetadataShift();
+uintptr_t ZPlatformAddressBase();
+size_t ZPlatformAddressOffsetBits();
+size_t ZPlatformAddressMetadataShift();
#endif // CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -774,7 +774,7 @@
bool is_oop = type == T_OBJECT || type == T_ARRAY;
LIR_Opr result = new_register(type);
value.load_item();
- assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
+ assert(type == T_INT || is_oop || (type == T_LONG && VM_Version::supports_ldrexd()), "unexpected type");
LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
__ xchg(addr, value.result(), result, tmp);
return result;
@@ -783,7 +783,7 @@
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr result = new_register(type);
value.load_item();
- assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type");
+ assert(type == T_INT || (type == T_LONG && VM_Version::supports_ldrexd ()), "unexpected type");
LIR_Opr tmp = new_register(type);
__ xadd(addr, value.result(), result, tmp);
return result;
--- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -592,10 +592,10 @@
__ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
if (pass_oop) {
__ mr(R5_ARG3, Rexception);
- __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
+ __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception));
} else {
__ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
- __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
+ __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception));
}
// Throw exception.
@@ -2105,7 +2105,7 @@
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ ld(R4_ARG2, 0, R18_locals);
- __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
+ __ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp);
__ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
__ cmpdi(CCR0, R4_ARG2, 0);
__ beq(CCR0, L_done);
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -1072,8 +1072,7 @@
void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) {
if (UseHeavyMonitors) {
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
- monitor, /*check_for_exceptions=*/ true);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
return;
}
@@ -1147,8 +1146,7 @@
// The lock has been converted into a heavy lock and hence
// we need to get into the slow case.
z_stg(object, obj_entry); // Restore object entry, has been cleared above.
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
- monitor, /*check_for_exceptions=*/false);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
// }
@@ -2095,7 +2093,7 @@
Label jvmti_post_done;
MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
z_bre(jvmti_post_done);
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry), /*check_exceptions=*/false);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
bind(jvmti_post_done);
}
}
@@ -2129,7 +2127,7 @@
MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
z_bre(jvmti_post_done);
if (!native_method) push(state); // see frame::interpreter_frame_result()
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), /*check_exceptions=*/false);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
if (!native_method) pop(state);
bind(jvmti_post_done);
}
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -247,54 +247,6 @@
__ bind(done);
}
-void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
- assert(ShenandoahCASBarrier, "should be enabled");
- Label is_null;
- __ testptr(dst, dst);
- __ jcc(Assembler::zero, is_null);
- resolve_forward_pointer_not_null(masm, dst, tmp);
- __ bind(is_null);
-}
-
-void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
- assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled");
- // The below loads the mark word, checks if the lowest two bits are
- // set, and if so, clear the lowest two bits and copy the result
- // to dst. Otherwise it leaves dst alone.
- // Implementing this is surprisingly awkward. I do it here by:
- // - Inverting the mark word
- // - Test lowest two bits == 0
- // - If so, set the lowest two bits
- // - Invert the result back, and copy to dst
-
- bool borrow_reg = (tmp == noreg);
- if (borrow_reg) {
- // No free registers available. Make one useful.
- tmp = LP64_ONLY(rscratch1) NOT_LP64(rdx);
- if (tmp == dst) {
- tmp = LP64_ONLY(rscratch2) NOT_LP64(rcx);
- }
- __ push(tmp);
- }
-
- assert_different_registers(dst, tmp);
-
- Label done;
- __ movptr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
- __ notptr(tmp);
- __ testb(tmp, markWord::marked_value);
- __ jccb(Assembler::notZero, done);
- __ orptr(tmp, markWord::marked_value);
- __ notptr(tmp);
- __ mov(dst, tmp);
- __ bind(done);
-
- if (borrow_reg) {
- __ pop(tmp);
- }
-}
-
-
void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) {
assert(ShenandoahLoadRefBarrier, "Should be enabled");
@@ -605,8 +557,9 @@
bool exchange, Register tmp1, Register tmp2) {
assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
+ assert_different_registers(oldval, newval, tmp1, tmp2);
- Label retry, done;
+ Label L_success, L_failure;
// Remember oldval for retry logic below
#ifdef _LP64
@@ -618,8 +571,10 @@
__ movptr(tmp1, oldval);
}
- // Step 1. Try to CAS with given arguments. If successful, then we are done,
- // and can safely return.
+ // Step 1. Fast-path.
+ //
+ // Try to CAS with given arguments. If successful, then we are done.
+
if (os::is_MP()) __ lock();
#ifdef _LP64
if (UseCompressedOops) {
@@ -629,21 +584,32 @@
{
__ cmpxchgptr(newval, addr);
}
- __ jcc(Assembler::equal, done, true);
+ __ jcc(Assembler::equal, L_success);
// Step 2. CAS had failed. This may be a false negative.
//
// The trouble comes when we compare the to-space pointer with the from-space
- // pointer to the same object. To resolve this, it will suffice to resolve both
- // oldval and the value from memory -- this will give both to-space pointers.
+ // pointer to the same object. To resolve this, it will suffice to resolve
+ // the value from memory -- this will give both to-space pointers.
// If they mismatch, then it was a legitimate failure.
//
+ // Before reaching to resolve sequence, see if we can avoid the whole shebang
+ // with filters.
+
+ // Filter: when offending in-memory value is NULL, the failure is definitely legitimate
+ __ testptr(oldval, oldval);
+ __ jcc(Assembler::zero, L_failure);
+
+ // Filter: when heap is stable, the failure is definitely legitimate
#ifdef _LP64
- if (UseCompressedOops) {
- __ decode_heap_oop(tmp1);
- }
+ const Register thread = r15_thread;
+#else
+ const Register thread = tmp2;
+ __ get_thread(thread);
#endif
- resolve_forward_pointer(masm, tmp1);
+ Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
+ __ jcc(Assembler::zero, L_failure);
#ifdef _LP64
if (UseCompressedOops) {
@@ -654,18 +620,70 @@
{
__ movptr(tmp2, oldval);
}
- resolve_forward_pointer(masm, tmp2);
+
+ // Decode offending in-memory value.
+ // Test if-forwarded
+ __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
+ __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded
+ __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded
+
+ // Load and mask forwarding pointer
+ __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
+ __ shrptr(tmp2, 2);
+ __ shlptr(tmp2, 2);
+#ifdef _LP64
+ if (UseCompressedOops) {
+ __ decode_heap_oop(tmp1); // decode for comparison
+ }
+#endif
+
+ // Now we have the forwarded offender in tmp2.
+ // Compare and if they don't match, we have legitimate failure
__ cmpptr(tmp1, tmp2);
- __ jcc(Assembler::notEqual, done, true);
+ __ jcc(Assembler::notEqual, L_failure);
+
+ // Step 3. Need to fix the memory ptr before continuing.
+ //
+ // At this point, we have from-space oldval in the register, and its to-space
+ // address is in tmp2. Let's try to update it into memory. We don't care if it
+ // succeeds or not. If it does, then the retrying CAS would see it and succeed.
+ // If this fixup fails, this means somebody else beat us to it, and necessarily
+ // with to-space ptr store. We still have to do the retry, because the GC might
+ // have updated the reference for us.
- // Step 3. Try to CAS again with resolved to-space pointers.
+#ifdef _LP64
+ if (UseCompressedOops) {
+ __ encode_heap_oop(tmp2); // previously decoded at step 2.
+ }
+#endif
+
+ if (os::is_MP()) __ lock();
+#ifdef _LP64
+ if (UseCompressedOops) {
+ __ cmpxchgl(tmp2, addr);
+ } else
+#endif
+ {
+ __ cmpxchgptr(tmp2, addr);
+ }
+
+ // Step 4. Try to CAS again.
//
- // Corner case: it may happen that somebody stored the from-space pointer
- // to memory while we were preparing for retry. Therefore, we can fail again
- // on retry, and so need to do this in loop, always resolving the failure
- // witness.
- __ bind(retry);
+ // This is guaranteed not to have false negatives, because oldval is definitely
+ // to-space, and memory pointer is to-space as well. Nothing is able to store
+ // from-space ptr into memory anymore. Make sure oldval is restored, after being
+ // garbled during retries.
+ //
+#ifdef _LP64
+ if (UseCompressedOops) {
+ __ movl(oldval, tmp2);
+ } else
+#endif
+ {
+ __ movptr(oldval, tmp2);
+ }
+
if (os::is_MP()) __ lock();
#ifdef _LP64
if (UseCompressedOops) {
@@ -675,41 +693,28 @@
{
__ cmpxchgptr(newval, addr);
}
- __ jcc(Assembler::equal, done, true);
+ if (!exchange) {
+ __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
+ }
-#ifdef _LP64
- if (UseCompressedOops) {
- __ movl(tmp2, oldval);
- __ decode_heap_oop(tmp2);
- } else
-#endif
- {
- __ movptr(tmp2, oldval);
- }
- resolve_forward_pointer(masm, tmp2);
-
- __ cmpptr(tmp1, tmp2);
- __ jcc(Assembler::equal, retry, true);
+ // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
+ // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
+ // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
- // Step 4. If we need a boolean result out of CAS, check the flag again,
- // and promote the result. Note that we handle the flag from both the CAS
- // itself and from the retry loop.
- __ bind(done);
- if (!exchange) {
+ if (exchange) {
+ __ bind(L_failure);
+ __ bind(L_success);
+ } else {
assert(res != NULL, "need result register");
-#ifdef _LP64
- __ setb(Assembler::equal, res);
- __ movzbl(res, res);
-#else
- // Need something else to clean the result, because some registers
- // do not have byte encoding that movzbl wants. Cannot do the xor first,
- // because it modifies the flags.
- Label res_non_zero;
+
+ Label exit;
+ __ bind(L_failure);
+ __ xorptr(res, res);
+ __ jmpb(exit);
+
+ __ bind(L_success);
__ movptr(res, 1);
- __ jcc(Assembler::equal, res_non_zero, true);
- __ xorptr(res, res);
- __ bind(res_non_zero);
-#endif
+ __ bind(exit);
}
}
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -55,9 +55,6 @@
bool tosca_live,
bool expand_call);
- void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
- void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
-
void load_reference_barrier_not_null(MacroAssembler* masm, Register dst);
void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp);
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -142,8 +142,7 @@
size_t ZPlatformAddressOffsetBits() {
const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB
- const size_t virtual_to_physical_ratio = 7; // 7:1
- const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * virtual_to_physical_ratio);
+ const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
}
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -24,23 +24,13 @@
#ifndef CPU_X86_GC_Z_ZGLOBALS_X86_HPP
#define CPU_X86_GC_Z_ZGLOBALS_X86_HPP
-//
-// Page Allocation Tiers
-// ---------------------
-//
-// Page Type Page Size Object Size Limit Object Alignment
-// ------------------------------------------------------------------
-// Small 2M <= 265K <MinObjAlignmentInBytes>
-// Medium 32M <= 4M 4K
-// Large X*M > 4M 2M
-// ------------------------------------------------------------------
-//
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
+const size_t ZPlatformHeapViews = 3;
const size_t ZPlatformNMethodDisarmedOffset = 4;
const size_t ZPlatformCacheLineSize = 64;
-uintptr_t ZPlatformAddressBase();
-size_t ZPlatformAddressOffsetBits();
-size_t ZPlatformAddressMetadataShift();
+uintptr_t ZPlatformAddressBase();
+size_t ZPlatformAddressOffsetBits();
+size_t ZPlatformAddressMetadataShift();
#endif // CPU_X86_GC_Z_ZGLOBALS_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/posix/gc/z/zInitialize_posix.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zInitialize.hpp"
+
+void ZInitialize::initialize_os() {
+ // Does nothing
+}
--- a/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -29,6 +29,10 @@
#include <sys/mman.h>
#include <sys/types.h>
+void ZVirtualMemoryManager::initialize_os() {
+ // Does nothing
+}
+
static void unmap(uintptr_t start, size_t size) {
const int res = munmap((void*)start, size);
assert(res == 0, "Failed to unmap memory");
@@ -51,7 +55,7 @@
return true;
}
-bool ZVirtualMemoryManager::reserve_platform(uintptr_t start, size_t size) {
+bool ZVirtualMemoryManager::reserve_contiguous_platform(uintptr_t start, size_t size) {
// Reserve address views
const uintptr_t marked0 = ZAddress::marked0(start);
const uintptr_t marked1 = ZAddress::marked1(start);
--- a/src/hotspot/os/windows/os_windows.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/os/windows/os_windows.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -795,6 +795,10 @@
}
}
+uint os::processor_id() {
+ return (uint)GetCurrentProcessorNumber();
+}
+
void os::set_native_thread_name(const char *name) {
// See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
--- a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -65,21 +65,22 @@
}
if (ret_frame.is_interpreted_frame()) {
- frame::ijava_state* istate = ret_frame.get_ijava_state();
- if (MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
- return false;
- }
- uint64_t reg_bcp = uc->uc_mcontext.regs->gpr[14/*R14_bcp*/];
- uint64_t istate_bcp = istate->bcp;
- uint64_t code_start = (uint64_t)(((Method*)(istate->method))->code_base());
- uint64_t code_end = (uint64_t)(((Method*)istate->method)->code_base() + ((Method*)istate->method)->code_size());
- if (istate_bcp >= code_start && istate_bcp < code_end) {
- // we have a valid bcp, don't touch it, do nothing
- } else if (reg_bcp >= code_start && reg_bcp < code_end) {
- istate->bcp = reg_bcp;
+ frame::ijava_state *istate = ret_frame.get_ijava_state();
+ const Method *m = (const Method*)(istate->method);
+ if (!Method::is_valid_method(m)) return false;
+ if (!Metaspace::contains(m->constMethod())) return false;
+
+ uint64_t reg_bcp = uc->uc_mcontext.regs->gpr[14/*R14_bcp*/];
+ uint64_t istate_bcp = istate->bcp;
+ uint64_t code_start = (uint64_t)(m->code_base());
+ uint64_t code_end = (uint64_t)(m->code_base() + m->code_size());
+ if (istate_bcp >= code_start && istate_bcp < code_end) {
+ // we have a valid bcp, don't touch it, do nothing
+ } else if (reg_bcp >= code_start && reg_bcp < code_end) {
+ istate->bcp = reg_bcp;
} else {
- return false;
- }
+ return false;
+ }
}
if (!ret_frame.safe_for_sender(this)) {
// nothing else to try if the frame isn't good
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -63,21 +63,24 @@
if (ret_frame.is_interpreted_frame()) {
frame::z_ijava_state* istate = ret_frame.ijava_state_unchecked();
- if ((stack_base() >= (address)istate && (address)istate > stack_end()) ||
- MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
- return false;
- }
- uint64_t reg_bcp = uc->uc_mcontext.gregs[13/*Z_BCP*/];
- uint64_t istate_bcp = istate->bcp;
- uint64_t code_start = (uint64_t)(((Method*)(istate->method))->code_base());
- uint64_t code_end = (uint64_t)(((Method*)istate->method)->code_base() + ((Method*)istate->method)->code_size());
- if (istate_bcp >= code_start && istate_bcp < code_end) {
- // we have a valid bcp, don't touch it, do nothing
- } else if (reg_bcp >= code_start && reg_bcp < code_end) {
- istate->bcp = reg_bcp;
- } else {
- return false;
- }
+ if (stack_base() >= (address)istate && (address)istate > stack_end()) {
+ return false;
+ }
+ const Method *m = (const Method*)(istate->method);
+ if (!Method::is_valid_method(m)) return false;
+ if (!Metaspace::contains(m->constMethod())) return false;
+
+ uint64_t reg_bcp = uc->uc_mcontext.gregs[13/*Z_BCP*/];
+ uint64_t istate_bcp = istate->bcp;
+ uint64_t code_start = (uint64_t)(m->code_base());
+ uint64_t code_end = (uint64_t)(m->code_base() + m->code_size());
+ if (istate_bcp >= code_start && istate_bcp < code_end) {
+ // we have a valid bcp, don't touch it, do nothing
+ } else if (reg_bcp >= code_start && reg_bcp < code_end) {
+ istate->bcp = reg_bcp;
+ } else {
+ return false;
+ }
}
if (!ret_frame.safe_for_sender(this)) {
// nothing else to try if the frame isn't good
--- a/src/hotspot/share/classfile/classFileParser.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/classfile/classFileParser.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -2448,17 +2448,10 @@
parsed_code_attribute = true;
// Stack size, locals size, and code size
- if (_major_version == 45 && _minor_version <= 2) {
- cfs->guarantee_more(4, CHECK_NULL);
- max_stack = cfs->get_u1_fast();
- max_locals = cfs->get_u1_fast();
- code_length = cfs->get_u2_fast();
- } else {
- cfs->guarantee_more(8, CHECK_NULL);
- max_stack = cfs->get_u2_fast();
- max_locals = cfs->get_u2_fast();
- code_length = cfs->get_u4_fast();
- }
+ cfs->guarantee_more(8, CHECK_NULL);
+ max_stack = cfs->get_u2_fast();
+ max_locals = cfs->get_u2_fast();
+ code_length = cfs->get_u4_fast();
if (_need_verify) {
guarantee_property(args_size <= max_locals,
"Arguments can't fit into locals in class file %s",
@@ -2489,13 +2482,8 @@
unsigned int calculated_attribute_length = 0;
- if (_major_version > 45 || (_major_version == 45 && _minor_version > 2)) {
- calculated_attribute_length =
- sizeof(max_stack) + sizeof(max_locals) + sizeof(code_length);
- } else {
- // max_stack, locals and length are smaller in pre-version 45.2 classes
- calculated_attribute_length = sizeof(u1) + sizeof(u1) + sizeof(u2);
- }
+ calculated_attribute_length =
+ sizeof(max_stack) + sizeof(max_locals) + sizeof(code_length);
calculated_attribute_length +=
code_length +
sizeof(exception_table_length) +
--- a/src/hotspot/share/classfile/javaClasses.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/classfile/javaClasses.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -87,6 +87,21 @@
ALL_INJECTED_FIELDS(DECLARE_INJECTED_FIELD)
};
+// Register native methods of Object
+void java_lang_Object::register_natives(TRAPS) {
+ InstanceKlass* obj = SystemDictionary::Object_klass();
+ Method::register_native(obj, vmSymbols::hashCode_name(),
+ vmSymbols::void_int_signature(), (address) &JVM_IHashCode, CHECK);
+ Method::register_native(obj, vmSymbols::wait_name(),
+ vmSymbols::long_void_signature(), (address) &JVM_MonitorWait, CHECK);
+ Method::register_native(obj, vmSymbols::notify_name(),
+ vmSymbols::void_method_signature(), (address) &JVM_MonitorNotify, CHECK);
+ Method::register_native(obj, vmSymbols::notifyAll_name(),
+ vmSymbols::void_method_signature(), (address) &JVM_MonitorNotifyAll, CHECK);
+ Method::register_native(obj, vmSymbols::clone_name(),
+ vmSymbols::void_object_signature(), (address) &JVM_Clone, THREAD);
+}
+
int JavaClasses::compute_injected_offset(InjectedFieldID id) {
return _injected_fields[id].compute_offset();
}
--- a/src/hotspot/share/classfile/javaClasses.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/classfile/javaClasses.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -88,6 +88,13 @@
BASIC_JAVA_CLASSES_DO_PART1(f) \
BASIC_JAVA_CLASSES_DO_PART2(f)
+// Interface to java.lang.Object objects
+
+class java_lang_Object : AllStatic {
+ public:
+ static void register_natives(TRAPS);
+};
+
// Interface to java.lang.String objects
class java_lang_String : AllStatic {
--- a/src/hotspot/share/classfile/systemDictionary.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/classfile/systemDictionary.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -1972,6 +1972,10 @@
resolve_wk_klasses_through(WK_KLASS_ENUM_NAME(Class_klass), scan, CHECK);
}
+ assert(WK_KLASS(Object_klass) != NULL, "well-known classes should now be initialized");
+
+ java_lang_Object::register_natives(CHECK);
+
// Calculate offsets for String and Class classes since they are loaded and
// can be used after this point.
java_lang_String::compute_offsets();
@@ -2128,26 +2132,6 @@
{
MutexLocker mu1(SystemDictionary_lock, THREAD);
- // See whether biased locking is enabled and if so set it for this
- // klass.
- // Note that this must be done past the last potential blocking
- // point / safepoint. We might enable biased locking lazily using a
- // VM_Operation to iterate the SystemDictionary and installing the
- // biasable mark word into each InstanceKlass's prototype header.
- // To avoid race conditions where we accidentally miss enabling the
- // optimization for one class in the process of being added to the
- // dictionary, we must not safepoint after the test of
- // BiasedLocking::enabled().
- if (UseBiasedLocking && BiasedLocking::enabled()) {
- // Set biased locking bit for all loaded classes; it will be
- // cleared if revocation occurs too often for this type
- // NOTE that we must only do this when the class is initally
- // defined, not each time it is referenced from a new class loader
- if (k->class_loader() == class_loader()) {
- k->set_prototype_header(markWord::biased_locking_prototype());
- }
- }
-
// Make a new dictionary entry.
Dictionary* dictionary = loader_data->dictionary();
InstanceKlass* sd_check = find_class(d_hash, name, dictionary);
--- a/src/hotspot/share/classfile/systemDictionary.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/classfile/systemDictionary.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -219,7 +219,6 @@
\
/*end*/
-
class SystemDictionary : AllStatic {
friend class BootstrapInfo;
friend class VMStructs;
@@ -383,7 +382,6 @@
int limit = (int)end_id + 1;
resolve_wk_klasses_until((WKID) limit, start_id, THREAD);
}
-
public:
#define WK_KLASS_DECLARE(name, symbol) \
static InstanceKlass* name() { return check_klass(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } \
@@ -628,21 +626,6 @@
// Basic find on classes in the midst of being loaded
static Symbol* find_placeholder(Symbol* name, ClassLoaderData* loader_data);
- // Add a placeholder for a class being loaded
- static void add_placeholder(int index,
- Symbol* class_name,
- ClassLoaderData* loader_data);
- static void remove_placeholder(int index,
- Symbol* class_name,
- ClassLoaderData* loader_data);
-
- // Performs cleanups after resolve_super_or_fail. This typically needs
- // to be called on failure.
- // Won't throw, but can block.
- static void resolution_cleanups(Symbol* class_name,
- ClassLoaderData* loader_data,
- TRAPS);
-
// Resolve well-known classes so they can be used like SystemDictionary::String_klass()
static void resolve_well_known_classes(TRAPS);
--- a/src/hotspot/share/code/nmethod.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/code/nmethod.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -1826,57 +1826,183 @@
}
}
-#define NMETHOD_SENTINEL ((nmethod*)badAddress)
-
nmethod* volatile nmethod::_oops_do_mark_nmethods;
-// An nmethod is "marked" if its _mark_link is set non-null.
-// Even if it is the end of the linked list, it will have a non-null link value,
-// as long as it is on the list.
-// This code must be MP safe, because it is used from parallel GC passes.
-bool nmethod::test_set_oops_do_mark() {
- assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
- if (_oops_do_mark_link == NULL) {
- // Claim this nmethod for this thread to mark.
- if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) {
- // Atomically append this nmethod (now claimed) to the head of the list:
- nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
- for (;;) {
- nmethod* required_mark_nmethods = observed_mark_nmethods;
- _oops_do_mark_link = required_mark_nmethods;
- observed_mark_nmethods =
- Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
- if (observed_mark_nmethods == required_mark_nmethods)
- break;
- }
- // Mark was clear when we first saw this guy.
- LogTarget(Trace, gc, nmethod) lt;
- if (lt.is_enabled()) {
- LogStream ls(lt);
- CompileTask::print(&ls, this, "oops_do, mark", /*short_form:*/ true);
- }
- return false;
+void nmethod::oops_do_log_change(const char* state) {
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ CompileTask::print(&ls, this, state, true /* short_form */);
+ }
+}
+
+bool nmethod::oops_do_try_claim() {
+ if (oops_do_try_claim_weak_request()) {
+ nmethod* result = oops_do_try_add_to_list_as_weak_done();
+ assert(result == NULL, "adding to global list as weak done must always succeed.");
+ return true;
+ }
+ return false;
+}
+
+bool nmethod::oops_do_try_claim_weak_request() {
+ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
+
+ if ((_oops_do_mark_link == NULL) &&
+ (Atomic::replace_if_null(mark_link(this, claim_weak_request_tag), &_oops_do_mark_link))) {
+ oops_do_log_change("oops_do, mark weak request");
+ return true;
+ }
+ return false;
+}
+
+void nmethod::oops_do_set_strong_done(nmethod* old_head) {
+ _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
+}
+
+nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
+ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
+
+ oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_done_tag), &_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag));
+ if (old_next == NULL) {
+ oops_do_log_change("oops_do, mark strong done");
+ }
+ return old_next;
+}
+
+nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
+ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
+ assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
+
+ oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_request_tag), &_oops_do_mark_link, next);
+ if (old_next == next) {
+ oops_do_log_change("oops_do, mark strong request");
+ }
+ return old_next;
+}
+
+bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
+ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
+ assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
+
+ oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(extract_nmethod(next), claim_strong_done_tag), &_oops_do_mark_link, next);
+ if (old_next == next) {
+ oops_do_log_change("oops_do, mark weak done -> mark strong done");
+ return true;
+ }
+ return false;
+}
+
+nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
+ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
+
+ assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
+ extract_state(_oops_do_mark_link) == claim_strong_request_tag,
+ "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
+
+ nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods);
+ // Self-loop if needed.
+ if (old_head == NULL) {
+ old_head = this;
+ }
+ // Try to install end of list and weak done tag.
+ if (Atomic::cmpxchg(mark_link(old_head, claim_weak_done_tag), &_oops_do_mark_link, mark_link(this, claim_weak_request_tag)) == mark_link(this, claim_weak_request_tag)) {
+ oops_do_log_change("oops_do, mark weak done");
+ return NULL;
+ } else {
+ return old_head;
+ }
+}
+
+void nmethod::oops_do_add_to_list_as_strong_done() {
+ assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
+
+ nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods);
+ // Self-loop if needed.
+ if (old_head == NULL) {
+ old_head = this;
+ }
+ assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
+ p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
+
+ oops_do_set_strong_done(old_head);
+}
+
+void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
+ if (!oops_do_try_claim_weak_request()) {
+ // Failed to claim for weak processing.
+ oops_do_log_change("oops_do, mark weak request fail");
+ return;
+ }
+
+ p->do_regular_processing(this);
+
+ nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
+ if (old_head == NULL) {
+ return;
+ }
+ oops_do_log_change("oops_do, mark weak done fail");
+ // Adding to global list failed, another thread added a strong request.
+ assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
+ "must be but is %u", extract_state(_oops_do_mark_link));
+
+ oops_do_log_change("oops_do, mark weak request -> mark strong done");
+
+ oops_do_set_strong_done(old_head);
+ // Do missing strong processing.
+ p->do_remaining_strong_processing(this);
+}
+
+void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
+ oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
+ if (next_raw == NULL) {
+ p->do_regular_processing(this);
+ oops_do_add_to_list_as_strong_done();
+ return;
+ }
+ // Claim failed. Figure out why and handle it.
+ if (oops_do_has_weak_request(next_raw)) {
+ oops_do_mark_link* old = next_raw;
+ // Claim failed because being weak processed (state == "weak request").
+ // Try to request deferred strong processing.
+ next_raw = oops_do_try_add_strong_request(old);
+ if (next_raw == old) {
+ // Successfully requested deferred strong processing.
+ return;
}
+ // Failed because of a concurrent transition. No longer in "weak request" state.
}
- // On fall through, another racing thread marked this nmethod before we did.
- return true;
+ if (oops_do_has_any_strong_state(next_raw)) {
+ // Already claimed for strong processing or requested for such.
+ return;
+ }
+ if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
+ // Successfully claimed "weak done" as "strong done". Do the missing marking.
+ p->do_remaining_strong_processing(this);
+ return;
+ }
+ // Claim failed, some other thread got it.
}
void nmethod::oops_do_marking_prologue() {
+ assert_at_safepoint();
+
log_trace(gc, nmethod)("oops_do_marking_prologue");
- assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
- // We use cmpxchg instead of regular assignment here because the user
- // may fork a bunch of threads, and we need them all to see the same state.
- nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
- guarantee(observed == NULL, "no races in this sequential code");
+ assert(_oops_do_mark_nmethods == NULL, "must be empty");
}
void nmethod::oops_do_marking_epilogue() {
- assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
- nmethod* cur = _oops_do_mark_nmethods;
- while (cur != NMETHOD_SENTINEL) {
- assert(cur != NULL, "not NULL-terminated");
- nmethod* next = cur->_oops_do_mark_link;
+ assert_at_safepoint();
+
+ nmethod* next = _oops_do_mark_nmethods;
+ _oops_do_mark_nmethods = NULL;
+ if (next == NULL) {
+ return;
+ }
+ nmethod* cur;
+ do {
+ cur = next;
+ next = extract_nmethod(cur->_oops_do_mark_link);
cur->_oops_do_mark_link = NULL;
DEBUG_ONLY(cur->verify_oop_relocations());
@@ -1885,11 +2011,8 @@
LogStream ls(lt);
CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
}
- cur = next;
- }
- nmethod* required = _oops_do_mark_nmethods;
- nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
- guarantee(observed == required, "no races in this sequential code");
+ // End if self-loop has been detected.
+ } while (cur != next);
log_trace(gc, nmethod)("oops_do_marking_epilogue");
}
@@ -2262,6 +2385,8 @@
assert(voc.ok(), "embedded oops must be OK");
Universe::heap()->verify_nmethod(this);
+ assert(_oops_do_mark_link == NULL, "_oops_do_mark_link for %s should be NULL but is " PTR_FORMAT,
+ nm->method()->external_name(), p2i(_oops_do_mark_link));
verify_scopes();
CompiledICLocker nm_verify(this);
--- a/src/hotspot/share/code/nmethod.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/code/nmethod.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -67,8 +67,8 @@
friend class NMethodSweeper;
friend class CodeCache; // scavengable oops
friend class JVMCINMethodData;
+
private:
-
// Shared fields for all nmethod's
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
@@ -76,8 +76,119 @@
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
+ // STW two-phase nmethod root processing helpers.
+ //
+ // When determining liveness of a given nmethod to do code cache unloading,
+ // some collectors need to to different things depending on whether the nmethods
+ // need to absolutely be kept alive during root processing; "strong"ly reachable
+ // nmethods are known to be kept alive at root processing, but the liveness of
+ // "weak"ly reachable ones is to be determined later.
+ //
+ // We want to allow strong and weak processing of nmethods by different threads
+ // at the same time without heavy synchronization. Additional constraints are
+ // to make sure that every nmethod is processed a minimal amount of time, and
+ // nmethods themselves are always iterated at most once at a particular time.
+ //
+ // Note that strong processing work must be a superset of weak processing work
+ // for this code to work.
+ //
+ // We store state and claim information in the _oops_do_mark_link member, using
+ // the two LSBs for the state and the remaining upper bits for linking together
+ // nmethods that were already visited.
+ // The last element is self-looped, i.e. points to itself to avoid some special
+ // "end-of-list" sentinel value.
+ //
+ // _oops_do_mark_link special values:
+ //
+ // _oops_do_mark_link == NULL: the nmethod has not been visited at all yet, i.e.
+ // is Unclaimed.
+ //
+ // For other values, its lowest two bits indicate the following states of the nmethod:
+ //
+ // weak_request (WR): the nmethod has been claimed by a thread for weak processing
+ // weak_done (WD): weak processing has been completed for this nmethod.
+ // strong_request (SR): the nmethod has been found to need strong processing while
+ // being weak processed.
+ // strong_done (SD): strong processing has been completed for this nmethod .
+ //
+ // The following shows the _only_ possible progressions of the _oops_do_mark_link
+ // pointer.
+ //
+ // Given
+ // N as the nmethod
+ // X the current next value of _oops_do_mark_link
+ //
+ // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by
+ // a single thread.
+ // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been
+ // completed (as above) another thread found that the nmethod needs strong
+ // processing after all.
+ // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another
+ // thread finds that the nmethod needs strong processing, marks it as such and
+ // terminates. The original thread completes strong processing.
+ // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from
+ // the beginning by a single thread.
+ //
+ // "|" describes the concatentation of bits in _oops_do_mark_link.
+ //
+ // The diagram also describes the threads responsible for changing the nmethod to
+ // the next state by marking the _transition_ with (C) and (O), which mean "current"
+ // and "other" thread respectively.
+ //
+ struct oops_do_mark_link; // Opaque data type.
+
+ // States used for claiming nmethods during root processing.
+ static const uint claim_weak_request_tag = 0;
+ static const uint claim_weak_done_tag = 1;
+ static const uint claim_strong_request_tag = 2;
+ static const uint claim_strong_done_tag = 3;
+
+ static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
+ assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
+ assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB");
+ return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
+ }
+
+ static uint extract_state(oops_do_mark_link* link) {
+ return (uint)((uintptr_t)link & 0x3);
+ }
+
+ static nmethod* extract_nmethod(oops_do_mark_link* link) {
+ return (nmethod*)((uintptr_t)link & ~0x3);
+ }
+
+ void oops_do_log_change(const char* state);
+
+ static bool oops_do_has_weak_request(oops_do_mark_link* next) {
+ return extract_state(next) == claim_weak_request_tag;
+ }
+
+ static bool oops_do_has_any_strong_state(oops_do_mark_link* next) {
+ return extract_state(next) >= claim_strong_request_tag;
+ }
+
+ // Attempt Unclaimed -> N|WR transition. Returns true if successful.
+ bool oops_do_try_claim_weak_request();
+
+ // Attempt Unclaimed -> N|SD transition. Returns the current link.
+ oops_do_mark_link* oops_do_try_claim_strong_done();
+ // Attempt N|WR -> X|WD transition. Returns NULL if successful, X otherwise.
+ nmethod* oops_do_try_add_to_list_as_weak_done();
+
+ // Attempt X|WD -> N|SR transition. Returns the current link.
+ oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next);
+ // Attempt X|WD -> X|SD transition. Returns true if successful.
+ bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next);
+
+ // Do the N|SD -> X|SD transition.
+ void oops_do_add_to_list_as_strong_done();
+
+ // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD
+ // transitions).
+ void oops_do_set_strong_done(nmethod* old_head);
+
static nmethod* volatile _oops_do_mark_nmethods;
- nmethod* volatile _oops_do_mark_link;
+ oops_do_mark_link* volatile _oops_do_mark_link;
// offsets for entry points
address _entry_point; // entry point with class check
@@ -480,11 +591,30 @@
void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool allow_dead);
- bool test_set_oops_do_mark();
+ // All-in-one claiming of nmethods: returns true if the caller successfully claimed that
+ // nmethod.
+ bool oops_do_try_claim();
+
+ // Class containing callbacks for the oops_do_process_weak/strong() methods
+ // below.
+ class OopsDoProcessor {
+ public:
+ // Process the oops of the given nmethod based on whether it has been called
+ // in a weak or strong processing context, i.e. apply either weak or strong
+ // work on it.
+ virtual void do_regular_processing(nmethod* nm) = 0;
+ // Assuming that the oops of the given nmethod has already been its weak
+ // processing applied, apply the remaining strong processing part.
+ virtual void do_remaining_strong_processing(nmethod* nm) = 0;
+ };
+
+ // The following two methods do the work corresponding to weak/strong nmethod
+ // processing.
+ void oops_do_process_weak(OopsDoProcessor* p);
+ void oops_do_process_strong(OopsDoProcessor* p);
+
static void oops_do_marking_prologue();
static void oops_do_marking_epilogue();
- static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
- bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
private:
ScopeDesc* scope_desc_in(address begin, address end);
--- a/src/hotspot/share/compiler/oopMap.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/compiler/oopMap.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -308,10 +308,13 @@
// handle derived pointers first (otherwise base pointer may be
// changed before derived pointer offset has been collected)
- OopMapValue omv;
{
- OopMapStream oms(map);
- if (!oms.is_done()) {
+ for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
+ OopMapValue omv = oms.current();
+ if (omv.type() != OopMapValue::derived_oop_value) {
+ continue;
+ }
+
#ifndef TIERED
COMPILER1_PRESENT(ShouldNotReachHere();)
#if INCLUDE_JVMCI
@@ -320,31 +323,25 @@
}
#endif
#endif // !TIERED
- do {
- omv = oms.current();
- if (omv.type() == OopMapValue::derived_oop_value) {
- oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
- guarantee(loc != NULL, "missing saved register");
- oop *derived_loc = loc;
- oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
- // Ignore NULL oops and decoded NULL narrow oops which
- // equal to CompressedOops::base() when a narrow oop
- // implicit null check is used in compiled code.
- // The narrow_oop_base could be NULL or be the address
- // of the page below heap depending on compressed oops mode.
- if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
- derived_oop_fn(base_loc, derived_loc);
- }
- }
- oms.next();
- } while (!oms.is_done());
+ oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
+ guarantee(loc != NULL, "missing saved register");
+ oop *derived_loc = loc;
+ oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
+ // Ignore NULL oops and decoded NULL narrow oops which
+ // equal to CompressedOops::base() when a narrow oop
+ // implicit null check is used in compiled code.
+ // The narrow_oop_base could be NULL or be the address
+ // of the page below heap depending on compressed oops mode.
+ if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
+ derived_oop_fn(base_loc, derived_loc);
+ }
}
}
{
// We want coop and oop oop_types
for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
- omv = oms.current();
+ OopMapValue omv = oms.current();
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
// It should be an error if no location can be found for a
// register mentioned as contained an oop of some kind. Maybe
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -229,10 +229,6 @@
_rs_length_seq->add(rs_length);
}
-size_t G1Analytics::predict_rs_length_diff() const {
- return get_new_size_prediction(_rs_length_diff_seq);
-}
-
double G1Analytics::predict_alloc_rate_ms() const {
return get_new_prediction(_alloc_rate_ms_seq);
}
@@ -334,7 +330,7 @@
}
size_t G1Analytics::predict_rs_length() const {
- return get_new_size_prediction(_rs_length_seq);
+ return get_new_size_prediction(_rs_length_seq) + get_new_prediction(_rs_length_diff_seq);
}
size_t G1Analytics::predict_pending_cards() const {
--- a/src/hotspot/share/gc/g1/g1Analytics.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Analytics.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -115,8 +115,6 @@
void report_pending_cards(double pending_cards);
void report_rs_length(double rs_length);
- size_t predict_rs_length_diff() const;
-
double predict_alloc_rate_ms() const;
int num_alloc_rate_ms() const;
--- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -26,6 +26,7 @@
#include "code/nmethod.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "oops/access.inline.hpp"
@@ -52,14 +53,62 @@
do_oop_work(o);
}
-void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
- nmethod* nm = cb->as_nmethod_or_null();
- if (nm != NULL) {
- if (!nm->test_set_oops_do_mark()) {
- _oc.set_nm(nm);
- nm->oops_do(&_oc);
- nm->fix_oop_relocations();
- }
+template<typename T>
+void G1CodeBlobClosure::MarkingOopClosure::do_oop_work(T* p) {
+ T oop_or_narrowoop = RawAccess<>::oop_load(p);
+ if (!CompressedOops::is_null(oop_or_narrowoop)) {
+ oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
+ _cm->mark_in_next_bitmap(_worker_id, o);
}
}
+G1CodeBlobClosure::MarkingOopClosure::MarkingOopClosure(uint worker_id) :
+ _cm(G1CollectedHeap::heap()->concurrent_mark()), _worker_id(worker_id) { }
+
+void G1CodeBlobClosure::MarkingOopClosure::do_oop(oop* o) {
+ do_oop_work(o);
+}
+
+void G1CodeBlobClosure::MarkingOopClosure::do_oop(narrowOop* o) {
+ do_oop_work(o);
+}
+
+void G1CodeBlobClosure::do_evacuation_and_fixup(nmethod* nm) {
+ _oc.set_nm(nm);
+ nm->oops_do(&_oc);
+ nm->fix_oop_relocations();
+}
+
+void G1CodeBlobClosure::do_marking(nmethod* nm) {
+ nm->oops_do(&_marking_oc);
+}
+
+class G1NmethodProcessor : public nmethod::OopsDoProcessor {
+ G1CodeBlobClosure* _cl;
+
+public:
+ G1NmethodProcessor(G1CodeBlobClosure* cl) : _cl(cl) { }
+
+ void do_regular_processing(nmethod* nm) {
+ _cl->do_evacuation_and_fixup(nm);
+ }
+
+ void do_remaining_strong_processing(nmethod* nm) {
+ _cl->do_marking(nm);
+ }
+};
+
+void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ if (nm == NULL) {
+ return;
+ }
+
+ G1NmethodProcessor cl(this);
+
+ if (_strong) {
+ nm->oops_do_process_strong(&cl);
+ } else {
+ nm->oops_do_process_weak(&cl);
+ }
+}
--- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -28,9 +28,11 @@
#include "gc/g1/g1CollectedHeap.hpp"
#include "memory/iterator.hpp"
+class G1ConcurrentMark;
class nmethod;
class G1CodeBlobClosure : public CodeBlobClosure {
+ // Gather nmethod remembered set entries.
class HeapRegionGatheringOopClosure : public OopClosure {
G1CollectedHeap* _g1h;
OopClosure* _work;
@@ -50,9 +52,35 @@
}
};
+ // Mark all oops below TAMS.
+ class MarkingOopClosure : public OopClosure {
+ G1ConcurrentMark* _cm;
+ uint _worker_id;
+
+ template <typename T>
+ void do_oop_work(T* p);
+
+ public:
+ MarkingOopClosure(uint worker_id);
+
+ void do_oop(oop* o);
+ void do_oop(narrowOop* o);
+ };
+
HeapRegionGatheringOopClosure _oc;
+ MarkingOopClosure _marking_oc;
+
+ bool _strong;
+
+ void do_code_blob_weak(CodeBlob* cb);
+ void do_code_blob_strong(CodeBlob* cb);
+
public:
- G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
+ G1CodeBlobClosure(uint worker_id, OopClosure* oc, bool strong) :
+ _oc(oc), _marking_oc(worker_id), _strong(strong) { }
+
+ void do_evacuation_and_fixup(nmethod* nm);
+ void do_marking(nmethod* nm);
void do_code_blob(CodeBlob* cb);
};
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -4098,7 +4098,7 @@
_cl.complete_work();
G1Policy* policy = G1CollectedHeap::heap()->policy();
- policy->record_max_rs_length(_rs_length);
+ policy->record_rs_length(_rs_length);
policy->cset_regions_freed();
}
public:
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -198,13 +198,17 @@
// Recursively traverse all live objects and mark them.
GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
- // Do the actual marking.
- G1FullGCMarkTask marking_task(this);
- run_task(&marking_task);
+ {
+ // Do the actual marking.
+ G1FullGCMarkTask marking_task(this);
+ run_task(&marking_task);
+ }
- // Process references discovered during marking.
- G1FullGCReferenceProcessingExecutor reference_processing(this);
- reference_processing.execute(scope()->timer(), scope()->tracer());
+ {
+ // Process references discovered during marking.
+ G1FullGCReferenceProcessingExecutor reference_processing(this);
+ reference_processing.execute(scope()->timer(), scope()->tracer());
+ }
// Weak oops cleanup.
{
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -62,7 +62,6 @@
_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");)
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
- _gc_par_phases[WaitForStrongRoots] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong Roots (ms):");
_gc_par_phases[MergeER] = new WorkerDataArray<double>(max_gc_threads, "Eager Reclaim (ms):");
@@ -165,7 +164,7 @@
void G1GCPhaseTimes::reset() {
_cur_collection_initial_evac_time_ms = 0.0;
- _cur_optional_evac_ms = 0.0;
+ _cur_optional_evac_time_ms = 0.0;
_cur_collection_code_root_fixup_time_ms = 0.0;
_cur_strong_code_root_purge_time_ms = 0.0;
_cur_merge_heap_roots_time_ms = 0.0;
@@ -417,14 +416,14 @@
}
double G1GCPhaseTimes::print_evacuate_optional_collection_set() const {
- const double sum_ms = _cur_optional_evac_ms + _cur_optional_merge_heap_roots_time_ms;
+ const double sum_ms = _cur_optional_evac_time_ms + _cur_optional_merge_heap_roots_time_ms;
if (sum_ms > 0) {
info_time("Merge Optional Heap Roots", _cur_optional_merge_heap_roots_time_ms);
debug_time("Prepare Optional Merge Heap Roots", _cur_optional_prepare_merge_heap_roots_time_ms);
debug_phase(_gc_par_phases[OptMergeRS]);
- info_time("Evacuate Optional Collection Set", _cur_optional_evac_ms);
+ info_time("Evacuate Optional Collection Set", _cur_optional_evac_time_ms);
debug_phase(_gc_par_phases[OptScanHR]);
debug_phase(_gc_par_phases[OptObjCopy]);
debug_phase(_gc_par_phases[OptCodeRoots]);
@@ -566,7 +565,6 @@
"JVMTIRoots",
AOT_ONLY("AOTCodeRoots" COMMA)
"CMRefRoots",
- "WaitForStrongRoots",
"MergeER",
"MergeRS",
"OptMergeRS",
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -57,7 +57,6 @@
JVMTIRoots,
AOT_ONLY(AOTCodeRoots COMMA)
CMRefRoots,
- WaitForStrongRoots,
MergeER,
MergeRS,
OptMergeRS,
@@ -83,7 +82,7 @@
};
static const GCParPhases ExtRootScanSubPhasesFirst = ThreadRoots;
- static const GCParPhases ExtRootScanSubPhasesLast = WaitForStrongRoots;
+ static const GCParPhases ExtRootScanSubPhasesLast = CMRefRoots;
enum GCMergeRSWorkTimes {
MergeRSMergedSparse,
@@ -157,7 +156,7 @@
WorkerDataArray<size_t>* _redirtied_cards;
double _cur_collection_initial_evac_time_ms;
- double _cur_optional_evac_ms;
+ double _cur_optional_evac_time_ms;
double _cur_collection_code_root_fixup_time_ms;
double _cur_strong_code_root_purge_time_ms;
@@ -297,7 +296,7 @@
}
void record_or_add_optional_evac_time(double ms) {
- _cur_optional_evac_ms += ms;
+ _cur_optional_evac_time_ms += ms;
}
void record_or_add_code_root_fixup_time(double ms) {
@@ -416,7 +415,7 @@
}
double cur_collection_par_time_ms() {
- return _cur_collection_initial_evac_time_ms;
+ return _cur_collection_initial_evac_time_ms + _cur_optional_evac_time_ms;
}
double cur_clear_ct_time_ms() {
--- a/src/hotspot/share/gc/g1/g1Policy.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -68,7 +68,7 @@
_reserve_regions(0),
_young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
_free_regions_at_end_of_collection(0),
- _max_rs_length(0),
+ _rs_length(0),
_rs_length_prediction(0),
_pending_cards_at_gc_start(0),
_pending_cards_at_prev_gc_end(0),
@@ -330,8 +330,7 @@
const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
const size_t pending_cards = _analytics->predict_pending_cards();
- const size_t adj_rs_length = rs_length + _analytics->predict_rs_length_diff();
- const size_t scanned_cards = _analytics->predict_card_num(adj_rs_length, true /* for_young_gc */);
+ const size_t scanned_cards = _analytics->predict_card_num(rs_length, true /* for_young_gc */);
const double base_time_ms =
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
survivor_regions_evac_time;
@@ -753,13 +752,13 @@
_analytics->report_cost_per_remset_card_ms(cost_per_remset_card_ms, this_pause_was_young_only);
}
- if (_max_rs_length > 0) {
+ if (_rs_length > 0) {
double cards_per_entry_ratio =
- (double) remset_cards_scanned / (double) _max_rs_length;
+ (double) remset_cards_scanned / (double) _rs_length;
_analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only);
}
- // This is defensive. For a while _max_rs_length could get
+ // This is defensive. For a while _rs_length could get
// smaller than _recorded_rs_length which was causing
// rs_length_diff to get very large and mess up the RSet length
// predictions. The reason was unsafe concurrent updates to the
@@ -774,8 +773,8 @@
// conditional below just in case.
size_t rs_length_diff = 0;
size_t recorded_rs_length = _collection_set->recorded_rs_length();
- if (_max_rs_length > recorded_rs_length) {
- rs_length_diff = _max_rs_length - recorded_rs_length;
+ if (_rs_length > recorded_rs_length) {
+ rs_length_diff = _rs_length - recorded_rs_length;
}
_analytics->report_rs_length_diff((double) rs_length_diff);
@@ -806,7 +805,7 @@
// During mixed gc we do not use them for young gen sizing.
if (this_pause_was_young_only) {
_analytics->report_pending_cards((double) _pending_cards_at_gc_start);
- _analytics->report_rs_length((double) _max_rs_length);
+ _analytics->report_rs_length((double) _rs_length);
}
}
@@ -951,7 +950,7 @@
}
double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
- size_t rs_length = _analytics->predict_rs_length() + _analytics->predict_rs_length_diff();
+ size_t rs_length = _analytics->predict_rs_length();
size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->in_young_only_phase());
return predict_base_elapsed_time_ms(pending_cards, card_num);
}
--- a/src/hotspot/share/gc/g1/g1Policy.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -96,7 +96,7 @@
uint _free_regions_at_end_of_collection;
- size_t _max_rs_length;
+ size_t _rs_length;
size_t _rs_length_prediction;
@@ -136,8 +136,8 @@
hr->install_surv_rate_group(_survivor_surv_rate_group);
}
- void record_max_rs_length(size_t rs_length) {
- _max_rs_length = rs_length;
+ void record_rs_length(size_t rs_length) {
+ _rs_length = rs_length;
}
double predict_base_elapsed_time_ms(size_t pending_cards) const;
--- a/src/hotspot/share/gc/g1/g1RootClosures.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -45,8 +45,6 @@
CodeBlobClosure* strong_codeblobs() { return &_closures._codeblobs; }
CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; }
-
- bool trace_metadata() { return false; }
};
// Closures used during initial mark.
@@ -71,10 +69,6 @@
CodeBlobClosure* strong_codeblobs() { return &_strong._codeblobs; }
CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; }
-
- // If we are not marking all weak roots then we are tracing
- // which metadata is alive.
- bool trace_metadata() { return MarkWeak == G1MarkPromotedFromRoot; }
};
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
--- a/src/hotspot/share/gc/g1/g1RootClosures.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RootClosures.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -52,9 +52,6 @@
// Applied to code blobs treated as weak roots.
virtual CodeBlobClosure* weak_codeblobs() = 0;
- // Is this closure used for tracing metadata?
- virtual bool trace_metadata() = 0;
-
static G1EvacuationRootClosures* create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h);
};
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -45,34 +45,10 @@
#include "services/management.hpp"
#include "utilities/macros.hpp"
-void G1RootProcessor::worker_has_discovered_all_strong_nmethods() {
- assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
-
- uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
- if (new_value == n_workers()) {
- // This thread is last. Notify the others.
- MonitorLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
- _lock.notify_all();
- }
-}
-
-void G1RootProcessor::wait_until_all_strong_nmethods_discovered() {
- assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
-
- if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
- MonitorLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
- while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
- ml.wait(0);
- }
- }
-}
-
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
_g1h(g1h),
_process_strong_tasks(G1RP_PS_NumElements),
- _srs(n_workers),
- _lock(Mutex::leaf, "G1 Root Scan barrier lock", false, Mutex::_safepoint_check_never),
- _n_workers_discovered_strong_classes(0) {}
+ _srs(n_workers) {}
void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id) {
G1GCPhaseTimes* phase_times = _g1h->phase_times();
@@ -80,7 +56,7 @@
G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_id);
G1EvacuationRootClosures* closures = pss->closures();
- process_java_roots(closures, phase_times, worker_id, closures->trace_metadata() /* notify_claimed_nmethods_done */);
+ process_java_roots(closures, phase_times, worker_id);
process_vm_roots(closures, phase_times, worker_id);
@@ -96,12 +72,6 @@
}
}
- if (closures->trace_metadata()) {
- G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongRoots, worker_id);
- // Wait to make sure all workers passed the strong nmethods phase.
- wait_until_all_strong_nmethods_discovered();
- }
-
_process_strong_tasks.all_tasks_completed(n_workers());
}
@@ -171,8 +141,7 @@
void G1RootProcessor::process_java_roots(G1RootClosures* closures,
G1GCPhaseTimes* phase_times,
- uint worker_id,
- bool notify_claimed_nmethods_done) {
+ uint worker_id) {
// We need to make make sure that the "strong" nmethods are processed first
// using the strong closure. Only after that we process the weakly reachable
// nmethods.
@@ -197,12 +166,6 @@
closures->strong_codeblobs());
}
- // This is the point where this worker thread will not find more strong nmethods.
- // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
- if (notify_claimed_nmethods_done) {
- worker_has_discovered_all_strong_nmethods();
- }
-
{
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_id);
if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -50,10 +50,6 @@
SubTasksDone _process_strong_tasks;
StrongRootsScope _srs;
- // Used to implement the Thread work barrier.
- Monitor _lock;
- volatile jint _n_workers_discovered_strong_classes;
-
enum G1H_process_roots_tasks {
G1RP_PS_Universe_oops_do,
G1RP_PS_JNIHandles_oops_do,
@@ -69,13 +65,9 @@
G1RP_PS_NumElements
};
- void worker_has_discovered_all_strong_nmethods();
- void wait_until_all_strong_nmethods_discovered();
-
void process_java_roots(G1RootClosures* closures,
G1GCPhaseTimes* phase_times,
- uint worker_id,
- bool notify_claimed_nmethods_done = false);
+ uint worker_id);
void process_vm_roots(G1RootClosures* closures,
G1GCPhaseTimes* phase_times,
--- a/src/hotspot/share/gc/g1/g1SharedClosures.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1SharedClosures.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -32,6 +32,11 @@
// Simple holder object for a complete set of closures used by the G1 evacuation code.
template <G1Mark Mark>
class G1SharedClosures {
+ static bool needs_strong_processing() {
+ // Request strong code root processing when G1MarkFromRoot is passed in during
+ // initial mark.
+ return Mark == G1MarkFromRoot;
+ }
public:
G1ParCopyClosure<G1BarrierNone, Mark> _oops;
G1ParCopyClosure<G1BarrierCLD, Mark> _oops_in_cld;
@@ -43,5 +48,5 @@
_oops(g1h, pss),
_oops_in_cld(g1h, pss),
_clds(&_oops_in_cld, process_only_dirty),
- _codeblobs(&_oops) {}
+ _codeblobs(pss->worker_id(), &_oops, needs_strong_processing()) {}
};
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -297,8 +297,15 @@
}
bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) {
- return call->is_CallLeaf() &&
- call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
+ if (!call->is_CallLeaf()) {
+ return false;
+ }
+
+ address entry_point = call->as_CallLeaf()->entry_point();
+ return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)) ||
+ (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup)) ||
+ (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_narrow)) ||
+ (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native));
}
bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -65,13 +65,6 @@
return true;
}
- if (available < threshold_bytes_allocated) {
- log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is lower than allocated recently (" SIZE_FORMAT "%s)",
- byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
- byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated));
- return true;
- }
-
size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
if (bytes_allocated > threshold_bytes_allocated) {
log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)",
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddressSpaceLimit.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+
+static size_t address_space_limit() {
+ julong limit = 0;
+
+ if (os::has_allocatable_memory_limit(&limit)) {
+ return (size_t)limit;
+ }
+
+ // No limit
+ return SIZE_MAX;
+}
+
+size_t ZAddressSpaceLimit::mark_stack() {
+ // Allow mark stacks to occupy 10% of the address space
+ const size_t limit = address_space_limit() / 10;
+ return align_up(limit, ZMarkStackSpaceExpandSize);
+}
+
+size_t ZAddressSpaceLimit::heap_view() {
+ // Allow all heap views to occupy 50% of the address space
+ const size_t limit = address_space_limit() / 2 / ZHeapViews;
+ return align_up(limit, ZGranuleSize);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSSPACELIMIT_HPP
+#define SHARE_GC_Z_ZADDRESSSPACELIMIT_HPP
+
+#include "memory/allocation.hpp"
+
+class ZAddressSpaceLimit : public AllStatic {
+public:
+ static size_t mark_stack();
+ static size_t heap_view();
+};
+
+#endif // SHARE_GC_Z_ZADDRESSSPACELIMIT_HPP
--- a/src/hotspot/share/gc/z/zArguments.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zArguments.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/z/zAddressSpaceLimit.hpp"
#include "gc/z/zArguments.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zWorkers.hpp"
@@ -37,6 +38,15 @@
void ZArguments::initialize() {
GCArguments::initialize();
+ // Check mark stack size
+ const size_t mark_stack_space_limit = ZAddressSpaceLimit::mark_stack();
+ if (ZMarkStackSpaceLimit > mark_stack_space_limit) {
+ if (!FLAG_IS_DEFAULT(ZMarkStackSpaceLimit)) {
+ vm_exit_during_initialization("ZMarkStackSpaceLimit too large for limited address space");
+ }
+ FLAG_SET_DEFAULT(ZMarkStackSpaceLimit, mark_stack_space_limit);
+ }
+
// Enable NUMA by default
if (FLAG_IS_DEFAULT(UseNUMA)) {
FLAG_SET_DEFAULT(UseNUMA, true);
--- a/src/hotspot/share/gc/z/zForwardingTable.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zForwardingTable.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -22,27 +22,27 @@
*/
#include "precompiled.hpp"
-#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zForwarding.inline.hpp"
#include "gc/z/zForwardingTable.inline.hpp"
+#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "utilities/debug.hpp"
ZForwardingTable::ZForwardingTable() :
- _map() {}
+ _map(ZAddressOffsetMax) {}
void ZForwardingTable::insert(ZForwarding* forwarding) {
- const uintptr_t addr = ZAddress::good(forwarding->start());
+ const uintptr_t offset = forwarding->start();
const size_t size = forwarding->size();
- assert(get(addr) == NULL, "Invalid entry");
- _map.put(addr, size, forwarding);
+ assert(_map.get(offset) == NULL, "Invalid entry");
+ _map.put(offset, size, forwarding);
}
void ZForwardingTable::remove(ZForwarding* forwarding) {
- const uintptr_t addr = ZAddress::good(forwarding->start());
+ const uintptr_t offset = forwarding->start();
const size_t size = forwarding->size();
- assert(get(addr) == forwarding, "Invalid entry");
- _map.put(addr, size, NULL);
+ assert(_map.get(offset) == forwarding, "Invalid entry");
+ _map.put(offset, size, NULL);
}
--- a/src/hotspot/share/gc/z/zForwardingTable.inline.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zForwardingTable.inline.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -24,11 +24,13 @@
#ifndef SHARE_GC_Z_ZFORWARDINGTABLE_INLINE_HPP
#define SHARE_GC_Z_ZFORWARDINGTABLE_INLINE_HPP
+#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zForwardingTable.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
inline ZForwarding* ZForwardingTable::get(uintptr_t addr) const {
- return _map.get(addr);
+ assert(!ZAddress::is_null(addr), "Invalid address");
+ return _map.get(ZAddress::offset(addr));
}
#endif // SHARE_GC_Z_ZFORWARDINGTABLE_INLINE_HPP
--- a/src/hotspot/share/gc/z/zFuture.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zFuture.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,10 @@
T _value;
public:
+ ZFuture();
+
void set(T value);
+ T peek();
T get();
};
--- a/src/hotspot/share/gc/z/zFuture.inline.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zFuture.inline.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -29,6 +29,10 @@
#include "runtime/thread.hpp"
template <typename T>
+inline ZFuture<T>::ZFuture() :
+ _value() {}
+
+template <typename T>
inline void ZFuture<T>::set(T value) {
// Set value
_value = value;
@@ -38,6 +42,11 @@
}
template <typename T>
+inline T ZFuture<T>::peek() {
+ return _value;
+}
+
+template <typename T>
inline T ZFuture<T>::get() {
// Wait for notification
Thread* const thread = Thread::current();
--- a/src/hotspot/share/gc/z/zGlobals.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zGlobals.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -44,6 +44,23 @@
const size_t ZGranuleSizeShift = ZPlatformGranuleSizeShift;
const size_t ZGranuleSize = (size_t)1 << ZGranuleSizeShift;
+// Number of heap views
+const size_t ZHeapViews = ZPlatformHeapViews;
+
+// Virtual memory to physical memory ratio
+const size_t ZVirtualToPhysicalRatio = 16; // 16:1
+
+//
+// Page Tiers (assuming ZGranuleSize=2M)
+// -------------------------------------
+//
+// Page Size Object Size Object Alignment
+// --------------------------------------------------
+// Small 2M <= 265K MinObjAlignmentInBytes
+// Medium 32M <= 4M 4K
+// Large N x 2M > 4M 2M
+//
+
// Page types
const uint8_t ZPageTypeSmall = 0;
const uint8_t ZPageTypeMedium = 1;
@@ -113,6 +130,7 @@
// Cache line size
const size_t ZCacheLineSize = ZPlatformCacheLineSize;
+#define ZCACHE_ALIGNED ATTRIBUTE_ALIGNED(ZCacheLineSize)
// Mark stack space
extern uintptr_t ZMarkStackSpaceStart;
--- a/src/hotspot/share/gc/z/zGranuleMap.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zGranuleMap.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -38,15 +38,15 @@
const size_t _size;
T* const _map;
- size_t index_for_addr(uintptr_t addr) const;
+ size_t index_for_offset(uintptr_t offset) const;
public:
- ZGranuleMap();
+ ZGranuleMap(size_t max_offset);
~ZGranuleMap();
- T get(uintptr_t addr) const;
- void put(uintptr_t addr, T value);
- void put(uintptr_t addr, size_t size, T value);
+ T get(uintptr_t offset) const;
+ void put(uintptr_t offset, T value);
+ void put(uintptr_t offset, size_t size, T value);
};
template <typename T>
--- a/src/hotspot/share/gc/z/zGranuleMap.inline.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zGranuleMap.inline.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -24,15 +24,18 @@
#ifndef SHARE_GC_Z_ZGRANULEMAP_INLINE_HPP
#define SHARE_GC_Z_ZGRANULEMAP_INLINE_HPP
-#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.hpp"
#include "memory/allocation.inline.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
template <typename T>
-inline ZGranuleMap<T>::ZGranuleMap() :
- _size(ZAddressOffsetMax >> ZGranuleSizeShift),
- _map(MmapArrayAllocator<T>::allocate(_size, mtGC)) {}
+inline ZGranuleMap<T>::ZGranuleMap(size_t max_offset) :
+ _size(max_offset >> ZGranuleSizeShift),
+ _map(MmapArrayAllocator<T>::allocate(_size, mtGC)) {
+ assert(is_aligned(max_offset, ZGranuleSize), "Misaligned");
+}
template <typename T>
inline ZGranuleMap<T>::~ZGranuleMap() {
@@ -40,32 +43,30 @@
}
template <typename T>
-inline size_t ZGranuleMap<T>::index_for_addr(uintptr_t addr) const {
- assert(!ZAddress::is_null(addr), "Invalid address");
-
- const size_t index = ZAddress::offset(addr) >> ZGranuleSizeShift;
+inline size_t ZGranuleMap<T>::index_for_offset(uintptr_t offset) const {
+ const size_t index = offset >> ZGranuleSizeShift;
assert(index < _size, "Invalid index");
return index;
}
template <typename T>
-inline T ZGranuleMap<T>::get(uintptr_t addr) const {
- const size_t index = index_for_addr(addr);
+inline T ZGranuleMap<T>::get(uintptr_t offset) const {
+ const size_t index = index_for_offset(offset);
return _map[index];
}
template <typename T>
-inline void ZGranuleMap<T>::put(uintptr_t addr, T value) {
- const size_t index = index_for_addr(addr);
+inline void ZGranuleMap<T>::put(uintptr_t offset, T value) {
+ const size_t index = index_for_offset(offset);
_map[index] = value;
}
template <typename T>
-inline void ZGranuleMap<T>::put(uintptr_t addr, size_t size, T value) {
+inline void ZGranuleMap<T>::put(uintptr_t offset, size_t size, T value) {
assert(is_aligned(size, ZGranuleSize), "Misaligned");
- const size_t start_index = index_for_addr(addr);
+ const size_t start_index = index_for_offset(offset);
const size_t end_index = start_index + (size >> ZGranuleSizeShift);
for (size_t index = start_index; index < end_index; index++) {
_map[index] = value;
--- a/src/hotspot/share/gc/z/zHeap.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zHeap.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -244,34 +244,14 @@
return _page_allocator.uncommit(delay);
}
-void ZHeap::before_flip() {
- if (ZVerifyViews) {
- // Unmap all pages
- _page_allocator.debug_unmap_all_pages();
- }
-}
-
-void ZHeap::after_flip() {
- if (ZVerifyViews) {
- // Map all pages
- ZPageTableIterator iter(&_page_table);
- for (ZPage* page; iter.next(&page);) {
- _page_allocator.debug_map_page(page);
- }
- _page_allocator.debug_map_cached_pages();
- }
-}
-
void ZHeap::flip_to_marked() {
- before_flip();
+ ZVerifyViewsFlip flip(&_page_allocator);
ZAddress::flip_to_marked();
- after_flip();
}
void ZHeap::flip_to_remapped() {
- before_flip();
+ ZVerifyViewsFlip flip(&_page_allocator);
ZAddress::flip_to_remapped();
- after_flip();
}
void ZHeap::mark_start() {
@@ -460,6 +440,14 @@
iter.objects_do(cl, visit_weaks);
}
+void ZHeap::pages_do(ZPageClosure* cl) {
+ ZPageTableIterator iter(&_page_table);
+ for (ZPage* page; iter.next(&page);) {
+ cl->do_page(page);
+ }
+ _page_allocator.pages_do(cl);
+}
+
void ZHeap::serviceability_initialize() {
_serviceability.initialize();
}
--- a/src/hotspot/share/gc/z/zHeap.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zHeap.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -63,9 +63,6 @@
size_t heap_max_size() const;
size_t heap_max_reserve_size() const;
- void before_flip();
- void after_flip();
-
void flip_to_marked();
void flip_to_remapped();
@@ -151,6 +148,7 @@
// Iteration
void object_iterate(ObjectClosure* cl, bool visit_weaks);
+ void pages_do(ZPageClosure* cl);
// Serviceability
void serviceability_initialize();
--- a/src/hotspot/share/gc/z/zHeapIterator.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zHeapIterator.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
+#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
@@ -126,7 +127,7 @@
ZHeapIterator::ZHeapIterator() :
_visit_stack(),
- _visit_map() {}
+ _visit_map(ZAddressOffsetMax) {}
ZHeapIterator::~ZHeapIterator() {
ZVisitMapIterator iter(&_visit_map);
@@ -148,11 +149,11 @@
}
ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
- const uintptr_t addr = ZOop::to_address(obj);
- ZHeapIteratorBitMap* map = _visit_map.get(addr);
+ const uintptr_t offset = ZAddress::offset(ZOop::to_address(obj));
+ ZHeapIteratorBitMap* map = _visit_map.get(offset);
if (map == NULL) {
map = new ZHeapIteratorBitMap(object_index_max());
- _visit_map.put(addr, map);
+ _visit_map.put(offset, map);
}
return map;
--- a/src/hotspot/share/gc/z/zInitialize.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zInitialize.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -50,4 +50,6 @@
ZTracer::initialize();
ZLargePages::initialize();
ZBarrierSet::set_barrier_set(barrier_set);
+
+ initialize_os();
}
--- a/src/hotspot/share/gc/z/zInitialize.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zInitialize.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -29,6 +29,9 @@
class ZBarrierSet;
class ZInitialize {
+private:
+ void initialize_os();
+
public:
ZInitialize(ZBarrierSet* barrier_set);
};
--- a/src/hotspot/share/gc/z/zMarkStack.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zMarkStack.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -73,8 +73,8 @@
class ZMarkStripe {
private:
- ZMarkStackList _published ATTRIBUTE_ALIGNED(ZCacheLineSize);
- ZMarkStackList _overflowed ATTRIBUTE_ALIGNED(ZCacheLineSize);
+ ZCACHE_ALIGNED ZMarkStackList _published;
+ ZCACHE_ALIGNED ZMarkStackList _overflowed;
public:
ZMarkStripe();
--- a/src/hotspot/share/gc/z/zMarkStackAllocator.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zMarkStackAllocator.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -50,8 +50,8 @@
class ZMarkStackAllocator {
private:
- ZMarkStackMagazineList _freelist ATTRIBUTE_ALIGNED(ZCacheLineSize);
- ZMarkStackSpace _space ATTRIBUTE_ALIGNED(ZCacheLineSize);
+ ZCACHE_ALIGNED ZMarkStackMagazineList _freelist;
+ ZCACHE_ALIGNED ZMarkStackSpace _space;
void prime_freelist();
ZMarkStackMagazine* create_magazine_from_space(uintptr_t addr, size_t size);
--- a/src/hotspot/share/gc/z/zMarkTerminate.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zMarkTerminate.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -30,9 +30,9 @@
class ZMarkTerminate {
private:
- uint _nworkers;
- volatile uint _nworking_stage0 ATTRIBUTE_ALIGNED(ZCacheLineSize);
- volatile uint _nworking_stage1;
+ uint _nworkers;
+ ZCACHE_ALIGNED volatile uint _nworking_stage0;
+ volatile uint _nworking_stage1;
bool enter_stage(volatile uint* nworking_stage);
void exit_stage(volatile uint* nworking_stage);
--- a/src/hotspot/share/gc/z/zMemory.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zMemory.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -26,6 +26,65 @@
#include "gc/z/zMemory.inline.hpp"
#include "memory/allocation.inline.hpp"
+ZMemory* ZMemoryManager::create(uintptr_t start, size_t size) {
+ ZMemory* const area = new ZMemory(start, size);
+ if (_callbacks._create != NULL) {
+ _callbacks._create(area);
+ }
+ return area;
+}
+
+void ZMemoryManager::destroy(ZMemory* area) {
+ if (_callbacks._destroy != NULL) {
+ _callbacks._destroy(area);
+ }
+ delete area;
+}
+
+void ZMemoryManager::shrink_from_front(ZMemory* area, size_t size) {
+ if (_callbacks._shrink_from_front != NULL) {
+ _callbacks._shrink_from_front(area, size);
+ }
+ area->shrink_from_front(size);
+}
+
+void ZMemoryManager::shrink_from_back(ZMemory* area, size_t size) {
+ if (_callbacks._shrink_from_back != NULL) {
+ _callbacks._shrink_from_back(area, size);
+ }
+ area->shrink_from_back(size);
+}
+
+void ZMemoryManager::grow_from_front(ZMemory* area, size_t size) {
+ if (_callbacks._grow_from_front != NULL) {
+ _callbacks._grow_from_front(area, size);
+ }
+ area->grow_from_front(size);
+}
+
+void ZMemoryManager::grow_from_back(ZMemory* area, size_t size) {
+ if (_callbacks._grow_from_back != NULL) {
+ _callbacks._grow_from_back(area, size);
+ }
+ area->grow_from_back(size);
+}
+
+ZMemoryManager::Callbacks::Callbacks() :
+ _create(NULL),
+ _destroy(NULL),
+ _shrink_from_front(NULL),
+ _shrink_from_back(NULL),
+ _grow_from_front(NULL),
+ _grow_from_back(NULL) {}
+
+ZMemoryManager::ZMemoryManager() :
+ _freelist(),
+ _callbacks() {}
+
+void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {
+ _callbacks = callbacks;
+}
+
uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
ZListIterator<ZMemory> iter(&_freelist);
for (ZMemory* area; iter.next(&area);) {
@@ -34,12 +93,12 @@
// Exact match, remove area
const uintptr_t start = area->start();
_freelist.remove(area);
- delete area;
+ destroy(area);
return start;
} else {
// Larger than requested, shrink area
const uintptr_t start = area->start();
- area->shrink_from_front(size);
+ shrink_from_front(area, size);
return start;
}
}
@@ -57,12 +116,12 @@
const uintptr_t start = area->start();
*allocated = area->size();
_freelist.remove(area);
- delete area;
+ destroy(area);
return start;
} else {
// Larger than requested, shrink area
const uintptr_t start = area->start();
- area->shrink_from_front(size);
+ shrink_from_front(area, size);
*allocated = size;
return start;
}
@@ -81,11 +140,11 @@
// Exact match, remove area
const uintptr_t start = area->start();
_freelist.remove(area);
- delete area;
+ destroy(area);
return start;
} else {
// Larger than requested, shrink area
- area->shrink_from_back(size);
+ shrink_from_back(area, size);
return area->end();
}
}
@@ -103,11 +162,11 @@
const uintptr_t start = area->start();
*allocated = area->size();
_freelist.remove(area);
- delete area;
+ destroy(area);
return start;
} else {
// Larger than requested, shrink area
- area->shrink_from_back(size);
+ shrink_from_back(area, size);
*allocated = size;
return area->end();
}
@@ -129,20 +188,20 @@
if (prev != NULL && start == prev->end()) {
if (end == area->start()) {
// Merge with prev and current area
- prev->grow_from_back(size + area->size());
+ grow_from_back(prev, size + area->size());
_freelist.remove(area);
delete area;
} else {
// Merge with prev area
- prev->grow_from_back(size);
+ grow_from_back(prev, size);
}
} else if (end == area->start()) {
// Merge with current area
- area->grow_from_front(size);
+ grow_from_front(area, size);
} else {
// Insert new area before current area
assert(end < area->start(), "Areas must not overlap");
- ZMemory* new_area = new ZMemory(start, size);
+ ZMemory* const new_area = create(start, size);
_freelist.insert_before(area, new_area);
}
@@ -155,10 +214,10 @@
ZMemory* const last = _freelist.last();
if (last != NULL && start == last->end()) {
// Merge with last area
- last->grow_from_back(size);
+ grow_from_back(last, size);
} else {
// Insert new area last
- ZMemory* new_area = new ZMemory(start, size);
+ ZMemory* const new_area = create(start, size);
_freelist.insert_last(new_area);
}
}
--- a/src/hotspot/share/gc/z/zMemory.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zMemory.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -49,14 +49,42 @@
};
class ZMemoryManager {
+public:
+ typedef void (*CreateDestroyCallback)(const ZMemory* area);
+ typedef void (*ResizeCallback)(const ZMemory* area, size_t size);
+
+ struct Callbacks {
+ CreateDestroyCallback _create;
+ CreateDestroyCallback _destroy;
+ ResizeCallback _shrink_from_front;
+ ResizeCallback _shrink_from_back;
+ ResizeCallback _grow_from_front;
+ ResizeCallback _grow_from_back;
+
+ Callbacks();
+ };
+
private:
ZList<ZMemory> _freelist;
+ Callbacks _callbacks;
+
+ ZMemory* create(uintptr_t start, size_t size);
+ void destroy(ZMemory* area);
+ void shrink_from_front(ZMemory* area, size_t size);
+ void shrink_from_back(ZMemory* area, size_t size);
+ void grow_from_front(ZMemory* area, size_t size);
+ void grow_from_back(ZMemory* area, size_t size);
public:
+ ZMemoryManager();
+
+ void register_callbacks(const Callbacks& callbacks);
+
uintptr_t alloc_from_front(size_t size);
uintptr_t alloc_from_front_at_most(size_t size, size_t* allocated);
uintptr_t alloc_from_back(size_t size);
uintptr_t alloc_from_back_at_most(size_t size, size_t* allocated);
+
void free(uintptr_t start, size_t size);
};
--- a/src/hotspot/share/gc/z/zNMethodTableIteration.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zNMethodTableIteration.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -31,9 +31,9 @@
class ZNMethodTableIteration {
private:
- ZNMethodTableEntry* _table;
- size_t _size;
- volatile size_t _claimed ATTRIBUTE_ALIGNED(ZCacheLineSize);
+ ZNMethodTableEntry* _table;
+ size_t _size;
+ ZCACHE_ALIGNED volatile size_t _claimed;
public:
ZNMethodTableIteration();
--- a/src/hotspot/share/gc/z/zPage.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPage.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -112,4 +112,9 @@
void print() const;
};
+class ZPageClosure {
+public:
+ virtual void do_page(const ZPage* page) = 0;
+};
+
#endif // SHARE_GC_Z_ZPAGE_HPP
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -60,7 +60,9 @@
_type(type),
_size(size),
_flags(flags),
- _total_collections(total_collections) {}
+ _total_collections(total_collections),
+ _node(),
+ _result() {}
uint8_t type() const {
return _type;
@@ -78,6 +80,10 @@
return _total_collections;
}
+ ZPage* peek() {
+ return _result.peek();
+ }
+
ZPage* wait() {
return _result.get();
}
@@ -108,6 +114,7 @@
_allocated(0),
_reclaimed(0),
_queue(),
+ _satisfied(),
_safe_delete(),
_uncommit(false),
_initialized(false) {
@@ -289,11 +296,7 @@
void ZPageAllocator::map_page(const ZPage* page) const {
// Map physical memory
- if (!page->is_mapped()) {
- _physical.map(page->physical_memory(), page->start());
- } else if (ZVerifyViews) {
- _physical.debug_map(page->physical_memory(), page->start());
- }
+ _physical.map(page->physical_memory(), page->start());
}
size_t ZPageAllocator::max_available(bool no_reserve) const {
@@ -433,14 +436,21 @@
} while (page == gc_marker);
{
- // Guard deletion of underlying semaphore. This is a workaround for a
- // bug in sem_post() in glibc < 2.21, where it's not safe to destroy
+ //
+ // We grab the lock here for two different reasons:
+ //
+ // 1) Guard deletion of underlying semaphore. This is a workaround for
+ // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
// the semaphore immediately after returning from sem_wait(). The
// reason is that sem_post() can touch the semaphore after a waiting
// thread have returned from sem_wait(). To avoid this race we are
// forcing the waiting thread to acquire/release the lock held by the
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
+ //
+ // 2) Guard the list of satisfied pages.
+ //
ZLocker<ZLock> locker(&_lock);
+ _satisfied.remove(&request);
}
}
@@ -462,7 +472,9 @@
}
// Map page if needed
- map_page(page);
+ if (!page->is_mapped()) {
+ map_page(page);
+ }
// Reset page. This updates the page's sequence number and must
// be done after page allocation, which potentially blocked in
@@ -500,6 +512,7 @@
// the dequeue operation must happen first, since the request
// will immediately be deallocated once it has been satisfied.
_queue.remove(request);
+ _satisfied.insert_first(request);
request->satisfy(page);
}
}
@@ -686,28 +699,21 @@
_physical.debug_map(page->physical_memory(), page->start());
}
-class ZPageCacheDebugMapClosure : public StackObj {
-private:
- const ZPageAllocator* const _allocator;
-
-public:
- ZPageCacheDebugMapClosure(const ZPageAllocator* allocator) :
- _allocator(allocator) {}
-
- virtual void do_page(const ZPage* page) {
- _allocator->debug_map_page(page);
- }
-};
-
-void ZPageAllocator::debug_map_cached_pages() const {
+void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
- ZPageCacheDebugMapClosure cl(this);
- _cache.pages_do(&cl);
+ _physical.debug_unmap(page->physical_memory(), page->start());
}
-void ZPageAllocator::debug_unmap_all_pages() const {
- assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
- _physical.debug_unmap(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax), 0 /* offset */);
+void ZPageAllocator::pages_do(ZPageClosure* cl) const {
+ ZListIterator<ZPageAllocRequest> iter(&_satisfied);
+ for (ZPageAllocRequest* request; iter.next(&request);) {
+ const ZPage* const page = request->peek();
+ if (page != NULL) {
+ cl->do_page(page);
+ }
+ }
+
+ _cache.pages_do(cl);
}
bool ZPageAllocator::is_alloc_stalled() const {
@@ -728,7 +734,8 @@
}
// Out of memory, fail allocation request
- _queue.remove_first();
+ _queue.remove(request);
+ _satisfied.insert_first(request);
request->satisfy(NULL);
}
}
--- a/src/hotspot/share/gc/z/zPageAllocator.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageAllocator.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -54,11 +54,12 @@
size_t _allocated;
ssize_t _reclaimed;
ZList<ZPageAllocRequest> _queue;
+ ZList<ZPageAllocRequest> _satisfied;
mutable ZSafeDelete<ZPage> _safe_delete;
bool _uncommit;
bool _initialized;
- static ZPage* const gc_marker;
+ static ZPage* const gc_marker;
void prime_cache(size_t size);
@@ -117,11 +118,12 @@
void map_page(const ZPage* page) const;
void debug_map_page(const ZPage* page) const;
- void debug_map_cached_pages() const;
- void debug_unmap_all_pages() const;
+ void debug_unmap_page(const ZPage* page) const;
bool is_alloc_stalled() const;
void check_out_of_memory();
+
+ void pages_do(ZPageClosure* cl) const;
};
#endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
--- a/src/hotspot/share/gc/z/zPageCache.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageCache.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -240,3 +240,26 @@
flush_list(cl, &_medium, to);
flush_per_numa_lists(cl, &_small, to);
}
+
+void ZPageCache::pages_do(ZPageClosure* cl) const {
+ // Small
+ ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
+ for (const ZList<ZPage>* list; iter_numa.next(&list);) {
+ ZListIterator<ZPage> iter_small(list);
+ for (ZPage* page; iter_small.next(&page);) {
+ cl->do_page(page);
+ }
+ }
+
+ // Medium
+ ZListIterator<ZPage> iter_medium(&_medium);
+ for (ZPage* page; iter_medium.next(&page);) {
+ cl->do_page(page);
+ }
+
+ // Large
+ ZListIterator<ZPage> iter_large(&_large);
+ for (ZPage* page; iter_large.next(&page);) {
+ cl->do_page(page);
+ }
+}
--- a/src/hotspot/share/gc/z/zPageCache.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageCache.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -71,7 +71,7 @@
void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
- template <typename Closure> void pages_do(Closure* cl) const;
+ void pages_do(ZPageClosure* cl) const;
};
#endif // SHARE_GC_Z_ZPAGECACHE_HPP
--- a/src/hotspot/share/gc/z/zPageCache.inline.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageCache.inline.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -32,28 +32,4 @@
return _available;
}
-template <typename Closure>
-inline void ZPageCache::pages_do(Closure* cl) const {
- // Small
- ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
- for (const ZList<ZPage>* list; iter_numa.next(&list);) {
- ZListIterator<ZPage> iter_small(list);
- for (ZPage* page; iter_small.next(&page);) {
- cl->do_page(page);
- }
- }
-
- // Medium
- ZListIterator<ZPage> iter_medium(&_medium);
- for (ZPage* page; iter_medium.next(&page);) {
- cl->do_page(page);
- }
-
- // Large
- ZListIterator<ZPage> iter_large(&_large);
- for (ZPage* page; iter_large.next(&page);) {
- cl->do_page(page);
- }
-}
-
#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
--- a/src/hotspot/share/gc/z/zPageTable.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageTable.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -22,7 +22,7 @@
*/
#include "precompiled.hpp"
-#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageTable.inline.hpp"
@@ -30,24 +30,24 @@
#include "utilities/debug.hpp"
ZPageTable::ZPageTable() :
- _map() {}
+ _map(ZAddressOffsetMax) {}
void ZPageTable::insert(ZPage* page) {
- const uintptr_t addr = ZAddress::good(page->start());
+ const uintptr_t offset = page->start();
const size_t size = page->size();
// Make sure a newly created page is
// visible before updating the page table.
OrderAccess::storestore();
- assert(get(addr) == NULL, "Invalid entry");
- _map.put(addr, size, page);
+ assert(_map.get(offset) == NULL, "Invalid entry");
+ _map.put(offset, size, page);
}
void ZPageTable::remove(ZPage* page) {
- const uintptr_t addr = ZAddress::good(page->start());
+ const uintptr_t offset = page->start();
const size_t size = page->size();
- assert(get(addr) == page, "Invalid entry");
- _map.put(addr, size, NULL);
+ assert(_map.get(offset) == page, "Invalid entry");
+ _map.put(offset, size, NULL);
}
--- a/src/hotspot/share/gc/z/zPageTable.inline.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zPageTable.inline.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -29,7 +29,8 @@
#include "gc/z/zPageTable.hpp"
inline ZPage* ZPageTable::get(uintptr_t addr) const {
- return _map.get(addr);
+ assert(!ZAddress::is_null(addr), "Invalid address");
+ return _map.get(ZAddress::offset(addr));
}
inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* page_table) :
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -149,7 +149,7 @@
virtual void do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
- if (nm != NULL && !nm->test_set_oops_do_mark()) {
+ if (nm != NULL && nm->oops_do_try_claim()) {
CodeBlobToOopClosure::do_code_blob(cb);
_bs->disarm(nm);
}
--- a/src/hotspot/share/gc/z/zVerify.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zVerify.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -26,6 +26,7 @@
#include "gc/z/zAddress.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zOop.hpp"
+#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zResurrection.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
@@ -170,3 +171,38 @@
ZStatTimerDisable disable;
roots_and_objects(true /* verify_weaks */);
}
+
+template <bool Map>
+class ZPageDebugMapOrUnmapClosure : public ZPageClosure {
+private:
+ const ZPageAllocator* const _allocator;
+
+public:
+ ZPageDebugMapOrUnmapClosure(const ZPageAllocator* allocator) :
+ _allocator(allocator) {}
+
+ void do_page(const ZPage* page) {
+ if (Map) {
+ _allocator->debug_map_page(page);
+ } else {
+ _allocator->debug_unmap_page(page);
+ }
+ }
+};
+
+ZVerifyViewsFlip::ZVerifyViewsFlip(const ZPageAllocator* allocator) :
+ _allocator(allocator) {
+ if (ZVerifyViews) {
+ // Unmap all pages
+ ZPageDebugMapOrUnmapClosure<false /* Map */> cl(_allocator);
+ ZHeap::heap()->pages_do(&cl);
+ }
+}
+
+ZVerifyViewsFlip::~ZVerifyViewsFlip() {
+ if (ZVerifyViews) {
+ // Map all pages
+ ZPageDebugMapOrUnmapClosure<true /* Map */> cl(_allocator);
+ ZHeap::heap()->pages_do(&cl);
+ }
+}
--- a/src/hotspot/share/gc/z/zVerify.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zVerify.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -26,6 +26,8 @@
#include "memory/allocation.hpp"
+class ZPageAllocator;
+
class ZVerify : public AllStatic {
private:
template <typename RootsIterator> static void roots();
@@ -45,4 +47,13 @@
static void after_weak_processing();
};
+class ZVerifyViewsFlip {
+private:
+ const ZPageAllocator* const _allocator;
+
+public:
+ ZVerifyViewsFlip(const ZPageAllocator* allocator);
+ ~ZVerifyViewsFlip();
+};
+
#endif // SHARE_GC_Z_ZVERIFY_HPP
--- a/src/hotspot/share/gc/z/zVirtualMemory.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -22,10 +22,13 @@
*/
#include "precompiled.hpp"
+#include "gc/z/zAddressSpaceLimit.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zVirtualMemory.inline.hpp"
#include "logging/log.hpp"
#include "services/memTracker.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/align.hpp"
ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
_manager(),
@@ -38,33 +41,105 @@
return;
}
- log_info(gc, init)("Address Space: " SIZE_FORMAT "T", ZAddressOffsetMax / K / G);
-
// Reserve address space
- if (reserve(0, ZAddressOffsetMax) < max_capacity) {
- log_error(gc)("Failed to reserve address space for Java heap");
+ if (!reserve(max_capacity)) {
+ log_error(gc)("Failed to reserve enough address space for Java heap");
return;
}
+ // Initialize OS specific parts
+ initialize_os();
+
// Successfully initialized
_initialized = true;
}
-size_t ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) {
- if (size < ZGranuleSize) {
+size_t ZVirtualMemoryManager::reserve_discontiguous(uintptr_t start, size_t size, size_t min_range) {
+ if (size < min_range) {
+ // Too small
+ return 0;
+ }
+
+ assert(is_aligned(size, ZGranuleSize), "Misaligned");
+
+ if (reserve_contiguous_platform(start, size)) {
+ // Make the address range free
+ _manager.free(start, size);
+ return size;
+ }
+
+ const size_t half = size / 2;
+ if (half < min_range) {
// Too small
return 0;
}
- if (!reserve_platform(start, size)) {
- const size_t half = size / 2;
- return reserve(start, half) + reserve(start + half, half);
+ // Divide and conquer
+ const size_t first_part = align_down(half, ZGranuleSize);
+ const size_t second_part = size - first_part;
+ return reserve_discontiguous(start, first_part, min_range) +
+ reserve_discontiguous(start + first_part, second_part, min_range);
+}
+
+size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) {
+ // Don't try to reserve address ranges smaller than 1% of the requested size.
+ // This avoids an explosion of reservation attempts in case large parts of the
+ // address space is already occupied.
+ const size_t min_range = align_up(size / 100, ZGranuleSize);
+ size_t start = 0;
+ size_t reserved = 0;
+
+ // Reserve size somewhere between [0, ZAddressOffsetMax)
+ while (reserved < size && start < ZAddressOffsetMax) {
+ const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start);
+ reserved += reserve_discontiguous(start, remaining, min_range);
+ start += remaining;
}
- // Make the address range free
- _manager.free(start, size);
+ return reserved;
+}
+
+bool ZVirtualMemoryManager::reserve_contiguous(size_t size) {
+ // Allow at most 8192 attempts spread evenly across [0, ZAddressOffsetMax)
+ const size_t end = ZAddressOffsetMax - size;
+ const size_t increment = align_up(end / 8192, ZGranuleSize);
+
+ for (size_t start = 0; start <= end; start += increment) {
+ if (reserve_contiguous_platform(start, size)) {
+ // Make the address range free
+ _manager.free(start, size);
+
+ // Success
+ return true;
+ }
+ }
+
+ // Failed
+ return false;
+}
- return size;
+bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
+ const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap_view());
+ const size_t size = MIN2(max_capacity * ZVirtualToPhysicalRatio, limit);
+
+ size_t reserved = size;
+ bool contiguous = true;
+
+ // Prefer a contiguous address space
+ if (!reserve_contiguous(size)) {
+ // Fall back to a discontiguous address space
+ reserved = reserve_discontiguous(size);
+ contiguous = false;
+ }
+
+ log_info(gc, init)("Address Space Type: %s/%s/%s",
+ (contiguous ? "Contiguous" : "Discontiguous"),
+ (limit == ZAddressOffsetMax ? "Unrestricted" : "Restricted"),
+ (reserved == size ? "Complete" : "Degraded"));
+ log_info(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M",
+ reserved / M, ZHeapViews, (reserved * ZHeapViews) / M);
+
+ return reserved >= max_capacity;
}
void ZVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) {
--- a/src/hotspot/share/gc/z/zVirtualMemory.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/gc/z/zVirtualMemory.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -50,8 +50,14 @@
ZMemoryManager _manager;
bool _initialized;
- bool reserve_platform(uintptr_t start, size_t size);
- size_t reserve(uintptr_t start, size_t size);
+ void initialize_os();
+
+ bool reserve_contiguous_platform(uintptr_t start, size_t size);
+ bool reserve_contiguous(size_t size);
+ size_t reserve_discontiguous(uintptr_t start, size_t size, size_t min_range);
+ size_t reserve_discontiguous(size_t size);
+ bool reserve(size_t max_capacity);
+
void nmt_reserve(uintptr_t start, size_t size);
public:
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -24,15 +24,18 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.inline.hpp"
-#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
+#include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
+#include "jfr/recorder/jfrRecorder.hpp"
+#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
-#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/utilities/jfrBigEndian.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "logging/log.hpp"
@@ -81,7 +84,7 @@
if (_lock != NULL) {
delete _lock;
}
- JfrTypeManager::clear();
+ JfrTypeManager::destroy();
}
static const size_t unlimited_mspace_size = 0;
@@ -332,6 +335,7 @@
typedef DiscardOp<DefaultDiscarder<JfrBuffer> > DiscardOperation;
size_t JfrCheckpointManager::clear() {
+ JfrTypeSet::clear();
DiscardOperation discarder(mutexed); // mutexed discard mode
process_free_list(discarder, _free_list_mspace);
process_free_list(discarder, _epoch_transition_mspace);
@@ -353,12 +357,34 @@
}
void JfrCheckpointManager::write_type_set() {
- JfrTypeManager::write_type_set();
+ assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
+ // can safepoint here
+ MutexLocker cld_lock(ClassLoaderDataGraph_lock);
+ MutexLocker module_lock(Module_lock);
+ if (!LeakProfiler::is_running()) {
+ JfrCheckpointWriter writer(true, true, Thread::current());
+ JfrTypeSet::serialize(&writer, NULL, false);
+ return;
+ }
+ Thread* const t = Thread::current();
+ JfrCheckpointWriter leakp_writer(false, true, t);
+ JfrCheckpointWriter writer(false, true, t);
+ JfrTypeSet::serialize(&writer, &leakp_writer, false);
+ ObjectSampleCheckpoint::on_type_set(leakp_writer);
}
void JfrCheckpointManager::write_type_set_for_unloaded_classes() {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- JfrTypeManager::write_type_set_for_unloaded_classes();
+ JfrCheckpointWriter writer(false, true, Thread::current());
+ const JfrCheckpointContext ctx = writer.context();
+ JfrTypeSet::serialize(&writer, NULL, true);
+ if (LeakProfiler::is_running()) {
+ ObjectSampleCheckpoint::on_type_set_unload(writer);
+ }
+ if (!JfrRecorder::is_recording()) {
+ // discard by rewind
+ writer.set_context(ctx);
+ }
}
void JfrCheckpointManager::create_thread_blob(JavaThread* jt) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -36,7 +36,6 @@
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadGroup.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadState.hpp"
-#include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/writers/jfrJavaEventWriter.hpp"
#include "memory/metaspaceGCThresholdUpdater.hpp"
@@ -271,30 +270,6 @@
}
}
-class TypeSetSerialization {
- private:
- JfrCheckpointWriter* _leakp_writer;
- bool _class_unload;
- public:
- TypeSetSerialization(bool class_unload, JfrCheckpointWriter* leakp_writer = NULL) :
- _leakp_writer(leakp_writer), _class_unload(class_unload) {}
- void write(JfrCheckpointWriter& writer) {
- JfrTypeSet::serialize(&writer, _leakp_writer, _class_unload);
- }
-};
-
-void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
- TypeSetSerialization type_set(true);
- type_set.write(writer);
-};
-
-TypeSet::TypeSet(JfrCheckpointWriter* leakp_writer) : _leakp_writer(leakp_writer) {}
-
-void TypeSet::serialize(JfrCheckpointWriter& writer) {
- TypeSetSerialization type_set(false, _leakp_writer);
- type_set.write(writer);
-};
-
void ThreadStateConstant::serialize(JfrCheckpointWriter& writer) {
JfrThreadState::serialize(writer);
}
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -37,11 +37,6 @@
void serialize(JfrCheckpointWriter& writer);
};
-class ClassUnloadTypeSet : public JfrSerializer {
- public:
- void serialize(JfrCheckpointWriter& writer);
-};
-
class FlagValueOriginConstant : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer);
@@ -107,14 +102,6 @@
void serialize(JfrCheckpointWriter& writer);
};
-class TypeSet : public JfrSerializer {
- private:
- JfrCheckpointWriter* _leakp_writer;
- public:
- explicit TypeSet(JfrCheckpointWriter* leakp_writer = NULL);
- void serialize(JfrCheckpointWriter& writer);
-};
-
class ThreadStateConstant : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -23,9 +23,6 @@
*/
#include "precompiled.hpp"
-#include "jfr/jfr.hpp"
-#include "jfr/leakprofiler/leakProfiler.hpp"
-#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
@@ -35,9 +32,9 @@
#include "memory/resourceArea.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
+#include "runtime/semaphore.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/exceptions.hpp"
-#include "runtime/semaphore.hpp"
class JfrSerializerRegistration : public JfrCHeapObj {
private:
@@ -120,7 +117,7 @@
static List types;
static List safepoint_types;
-void JfrTypeManager::clear() {
+void JfrTypeManager::destroy() {
SerializerRegistrationGuard guard;
Iterator iter(types);
JfrSerializerRegistration* registration;
@@ -152,39 +149,6 @@
}
}
-void JfrTypeManager::write_type_set() {
- assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
- // can safepoint here
- MutexLocker cld_lock(ClassLoaderDataGraph_lock);
- MutexLocker module_lock(Module_lock);
- if (!LeakProfiler::is_running()) {
- JfrCheckpointWriter writer(true, true, Thread::current());
- TypeSet set;
- set.serialize(writer);
- return;
- }
- JfrCheckpointWriter leakp_writer(false, true, Thread::current());
- JfrCheckpointWriter writer(false, true, Thread::current());
- TypeSet set(&leakp_writer);
- set.serialize(writer);
- ObjectSampleCheckpoint::on_type_set(leakp_writer);
-}
-
-void JfrTypeManager::write_type_set_for_unloaded_classes() {
- assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- JfrCheckpointWriter writer(false, true, Thread::current());
- const JfrCheckpointContext ctx = writer.context();
- ClassUnloadTypeSet class_unload_set;
- class_unload_set.serialize(writer);
- if (LeakProfiler::is_running()) {
- ObjectSampleCheckpoint::on_type_set_unload(writer);
- }
- if (!Jfr::is_recording()) {
- // discard anything written
- writer.set_context(ctx);
- }
-}
-
void JfrTypeManager::create_thread_blob(JavaThread* jt) {
assert(jt != NULL, "invariant");
ResourceMark rm(jt);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -32,11 +32,9 @@
class JfrTypeManager : public AllStatic {
public:
static bool initialize();
- static void clear();
+ static void destroy();
static void write_types(JfrCheckpointWriter& writer);
static void write_safepoint_types(JfrCheckpointWriter& writer);
- static void write_type_set();
- static void write_type_set_for_unloaded_classes();
static void create_thread_blob(JavaThread* jt);
static void write_thread_checkpoint(JavaThread* jt);
};
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -204,9 +204,14 @@
return write_klass(writer, klass, true);
}
+static bool is_implied(const Klass* klass) {
+ assert(klass != NULL, "invariant");
+ return klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass();
+}
+
static void do_implied(Klass* klass) {
assert(klass != NULL, "invariant");
- if (klass->is_subclass_of(SystemDictionary::ClassLoader_klass()) || klass == SystemDictionary::Object_klass()) {
+ if (is_implied(klass)) {
if (_leakp_writer != NULL) {
SET_LEAKP(klass);
}
@@ -259,6 +264,16 @@
typedef CompositeFunctor<KlassPtr, KlassWriter, KlassArtifactRegistrator> KlassWriterRegistration;
typedef JfrArtifactCallbackHost<KlassPtr, KlassWriterRegistration> KlassCallback;
+template <>
+class LeakPredicate<const Klass*> {
+public:
+ LeakPredicate(bool class_unload) {}
+ bool operator()(const Klass* klass) {
+ assert(klass != NULL, "invariant");
+ return IS_LEAKP(klass) || is_implied(klass);
+ }
+};
+
typedef LeakPredicate<KlassPtr> LeakKlassPredicate;
typedef JfrPredicatedTypeWriterImplHost<KlassPtr, LeakKlassPredicate, write__klass__leakp> LeakKlassWriterImpl;
typedef JfrTypeWriterHost<LeakKlassWriterImpl, TYPE_CLASS> LeakKlassWriter;
@@ -809,6 +824,12 @@
_artifacts->tally(sw);
}
+static bool clear_artifacts = false;
+
+void JfrTypeSet::clear() {
+ clear_artifacts = true;
+}
+
typedef Wrapper<KlassPtr, ClearArtifact> ClearKlassBits;
typedef Wrapper<MethodPtr, ClearArtifact> ClearMethodFlag;
typedef MethodIteratorHost<ClearMethodFlag, ClearKlassBits, false> ClearKlassAndMethods;
@@ -820,7 +841,7 @@
assert(_writer != NULL, "invariant");
ClearKlassAndMethods clear(_writer);
_artifacts->iterate_klasses(clear);
- _artifacts->clear();
+ JfrTypeSet::clear();
++checkpoint_id;
}
return total_count;
@@ -833,8 +854,9 @@
if (_artifacts == NULL) {
_artifacts = new JfrArtifactSet(class_unload);
} else {
- _artifacts->initialize(class_unload);
+ _artifacts->initialize(class_unload, clear_artifacts);
}
+ clear_artifacts = false;
assert(_artifacts != NULL, "invariant");
assert(!_artifacts->has_klass_entries(), "invariant");
}
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -31,6 +31,7 @@
class JfrTypeSet : AllStatic {
public:
+ static void clear();
static size_t serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload);
};
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -35,6 +35,8 @@
_cstring_table(new CStringTable(this)),
_sym_list(NULL),
_cstring_list(NULL),
+ _sym_query(NULL),
+ _cstring_query(NULL),
_symbol_id_counter(1),
_class_unload(false) {
assert(_sym_table != NULL, "invariant");
@@ -66,9 +68,11 @@
assert(!_cstring_table->has_entries(), "invariant");
_sym_list = NULL;
- _cstring_list = NULL;
_symbol_id_counter = 1;
+ _sym_query = NULL;
+ _cstring_query = NULL;
+
assert(bootstrap != NULL, "invariant");
bootstrap->reset();
_cstring_list = bootstrap;
@@ -88,10 +92,10 @@
}
bool JfrSymbolId::on_equals(uintptr_t hash, const SymbolEntry* entry) {
- // query might be NULL
assert(entry != NULL, "invariant");
assert(entry->hash() == hash, "invariant");
- return true;
+ assert(_sym_query != NULL, "invariant");
+ return _sym_query == entry->literal();
}
void JfrSymbolId::on_unlink(const SymbolEntry* entry) {
@@ -99,18 +103,36 @@
const_cast<Symbol*>(entry->literal())->decrement_refcount();
}
+static const char* resource_to_cstring(const char* resource_str) {
+ assert(resource_str != NULL, "invariant");
+ const size_t length = strlen(resource_str);
+ char* const c_string = JfrCHeapObj::new_array<char>(length + 1);
+ assert(c_string != NULL, "invariant");
+ strncpy(c_string, resource_str, length + 1);
+ return c_string;
+}
+
void JfrSymbolId::on_link(const CStringEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(++_symbol_id_counter);
+ const_cast<CStringEntry*>(entry)->set_literal(resource_to_cstring(entry->literal()));
entry->set_list_next(_cstring_list);
_cstring_list = entry;
}
+static bool string_compare(const char* query, const char* candidate) {
+ assert(query != NULL, "invariant");
+ assert(candidate != NULL, "invariant");
+ const size_t length = strlen(query);
+ return strncmp(query, candidate, length) == 0;
+}
+
bool JfrSymbolId::on_equals(uintptr_t hash, const CStringEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry->hash() == hash, "invariant");
- return true;
+ assert(_cstring_query != NULL, "invariant");
+ return string_compare(_cstring_query, entry->literal());
}
void JfrSymbolId::on_unlink(const CStringEntry* entry) {
@@ -131,16 +153,10 @@
return mark((uintptr_t)symbol->identity_hash(), symbol, leakp);
}
-static unsigned int last_symbol_hash = 0;
-static traceid last_symbol_id = 0;
-
traceid JfrSymbolId::mark(uintptr_t hash, const Symbol* data, bool leakp) {
assert(data != NULL, "invariant");
assert(_sym_table != NULL, "invariant");
- if (hash == last_symbol_hash) {
- assert(last_symbol_id != 0, "invariant");
- return last_symbol_id;
- }
+ _sym_query = data;
const SymbolEntry& entry = _sym_table->lookup_put(hash, data);
if (_class_unload) {
entry.set_unloading();
@@ -148,21 +164,13 @@
if (leakp) {
entry.set_leakp();
}
- last_symbol_hash = hash;
- last_symbol_id = entry.id();
- return last_symbol_id;
+ return entry.id();
}
-static unsigned int last_cstring_hash = 0;
-static traceid last_cstring_id = 0;
-
traceid JfrSymbolId::mark(uintptr_t hash, const char* str, bool leakp) {
assert(str != NULL, "invariant");
assert(_cstring_table != NULL, "invariant");
- if (hash == last_cstring_hash) {
- assert(last_cstring_id != 0, "invariant");
- return last_cstring_id;
- }
+ _cstring_query = str;
const CStringEntry& entry = _cstring_table->lookup_put(hash, str);
if (_class_unload) {
entry.set_unloading();
@@ -170,9 +178,7 @@
if (leakp) {
entry.set_leakp();
}
- last_cstring_hash = hash;
- last_cstring_id = entry.id();
- return last_cstring_id;
+ return entry.id();
}
/*
@@ -202,7 +208,7 @@
sprintf(hash_buf, "/" UINTX_FORMAT, hash);
const size_t hash_len = strlen(hash_buf);
const size_t result_len = ik->name()->utf8_length();
- anonymous_symbol = JfrCHeapObj::new_array<char>(result_len + hash_len + 1);
+ anonymous_symbol = NEW_RESOURCE_ARRAY(char, result_len + hash_len + 1);
ik->name()->as_klass_external_name(anonymous_symbol, (int)result_len + 1);
assert(strlen(anonymous_symbol) == result_len, "invariant");
strcpy(anonymous_symbol + result_len, hash_buf);
@@ -215,21 +221,12 @@
return k->is_instance_klass() && ((const InstanceKlass*)k)->is_unsafe_anonymous();
}
-static unsigned int last_anonymous_hash = 0;
-static traceid last_anonymous_id = 0;
-
traceid JfrSymbolId::mark_unsafe_anonymous_klass_name(const InstanceKlass* ik, bool leakp) {
assert(ik != NULL, "invariant");
assert(ik->is_unsafe_anonymous(), "invariant");
const uintptr_t hash = unsafe_anonymous_klass_name_hash(ik);
- if (hash == last_anonymous_hash) {
- assert(last_anonymous_id != 0, "invariant");
- return last_anonymous_id;
- }
- last_anonymous_hash = hash;
- const CStringEntry* const entry = _cstring_table->lookup_only(hash);
- last_anonymous_id = entry != NULL ? entry->id() : mark(hash, create_unsafe_anonymous_klass_symbol(ik, hash), leakp);
- return last_anonymous_id;
+ const char* const anonymous_klass_symbol = create_unsafe_anonymous_klass_symbol(ik, hash);
+ return mark(hash, anonymous_klass_symbol, leakp);
}
traceid JfrSymbolId::mark(const Klass* k, bool leakp) {
@@ -249,23 +246,20 @@
return symbol_id;
}
-static void reset_symbol_caches() {
- last_anonymous_hash = 0;
- last_symbol_hash = 0;
- last_cstring_hash = 0;
-}
-
JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_id(new JfrSymbolId()),
- _klass_list(NULL),
- _total_count(0) {
+ _klass_list(NULL),
+ _total_count(0) {
initialize(class_unload);
assert(_klass_list != NULL, "invariant");
}
static const size_t initial_class_list_size = 200;
-void JfrArtifactSet::initialize(bool class_unload) {
+void JfrArtifactSet::initialize(bool class_unload, bool clear /* false */) {
assert(_symbol_id != NULL, "invariant");
+ if (clear) {
+ _symbol_id->clear();
+ }
_symbol_id->set_class_unload(class_unload);
_total_count = 0;
// resource allocation
@@ -273,13 +267,8 @@
}
JfrArtifactSet::~JfrArtifactSet() {
- clear();
+ _symbol_id->clear();
delete _symbol_id;
-}
-
-void JfrArtifactSet::clear() {
- reset_symbol_caches();
- _symbol_id->clear();
// _klass_list will be cleared by a ResourceMark
}
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -222,6 +222,8 @@
CStringTable* _cstring_table;
const SymbolEntry* _sym_list;
const CStringEntry* _cstring_list;
+ const Symbol* _sym_query;
+ const char* _cstring_query;
traceid _symbol_id_counter;
bool _class_unload;
@@ -300,9 +302,7 @@
~JfrArtifactSet();
// caller needs ResourceMark
- void initialize(bool class_unload);
- void clear();
-
+ void initialize(bool class_unload, bool clear = false);
traceid mark(uintptr_t hash, const Symbol* sym, bool leakp);
traceid mark(const Klass* klass, bool leakp);
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -2330,6 +2330,16 @@
return true;
C2V_END
+C2V_VMENTRY_PREFIX(jlong, getCurrentJavaThread, (JNIEnv* env, jobject c2vm))
+ if (base_thread == NULL) {
+ // Called from unattached JVMCI shared library thread
+ return 0L;
+ }
+ JVMCITraceMark jtm("getCurrentJavaThread");
+ assert(base_thread->is_Java_thread(), "just checking");
+ return (jlong) p2i(base_thread);
+C2V_END
+
C2V_VMENTRY_PREFIX(jboolean, attachCurrentThread, (JNIEnv* env, jobject c2vm, jboolean as_daemon))
if (base_thread == NULL) {
// Called from unattached JVMCI shared library thread
@@ -2743,6 +2753,7 @@
{CC "deleteGlobalHandle", CC "(J)V", FN_PTR(deleteGlobalHandle)},
{CC "registerNativeMethods", CC "(" CLASS ")[J", FN_PTR(registerNativeMethods)},
{CC "isCurrentThreadAttached", CC "()Z", FN_PTR(isCurrentThreadAttached)},
+ {CC "getCurrentJavaThread", CC "()J", FN_PTR(getCurrentJavaThread)},
{CC "attachCurrentThread", CC "(Z)Z", FN_PTR(attachCurrentThread)},
{CC "detachCurrentThread", CC "()V", FN_PTR(detachCurrentThread)},
{CC "translate", CC "(" OBJECT ")J", FN_PTR(translate)},
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -180,6 +180,7 @@
nonstatic_field(JavaThread, _pending_transfer_to_interpreter, bool) \
nonstatic_field(JavaThread, _jvmci_counters, jlong*) \
nonstatic_field(JavaThread, _should_post_on_exceptions_flag, int) \
+ nonstatic_field(JavaThread, _jni_environment, JNIEnv) \
nonstatic_field(JavaThread, _reserved_stack_activation, address) \
\
static_field(java_lang_Class, _klass_offset, int) \
@@ -538,6 +539,7 @@
declare_constant(FieldInfo::field_slots) \
\
declare_constant(InstanceKlass::linked) \
+ declare_constant(InstanceKlass::being_initialized) \
declare_constant(InstanceKlass::fully_initialized) \
declare_constant(InstanceKlass::_misc_is_unsafe_anonymous) \
\
--- a/src/hotspot/share/memory/iterator.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/memory/iterator.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -59,7 +59,7 @@
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
- if (nm != NULL && !nm->test_set_oops_do_mark()) {
+ if (nm != NULL && nm->oops_do_try_claim()) {
do_nmethod(nm);
}
}
--- a/src/hotspot/share/oops/instanceKlass.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/oops/instanceKlass.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -69,6 +69,7 @@
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodComparator.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/biasedLocking.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
@@ -456,6 +457,12 @@
if (Arguments::is_dumping_archive()) {
SystemDictionaryShared::init_dumptime_info(this);
}
+
+ // Set biased locking bit for all instances of this class; it will be
+ // cleared if revocation occurs too often for this type
+ if (UseBiasedLocking && BiasedLocking::enabled()) {
+ set_prototype_header(markWord::biased_locking_prototype());
+ }
}
void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
@@ -2408,6 +2415,11 @@
// --> see ArrayKlass::complete_create_array_klass()
array_klasses()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
}
+
+ // Initialize current biased locking state.
+ if (UseBiasedLocking && BiasedLocking::enabled()) {
+ set_prototype_header(markWord::biased_locking_prototype());
+ }
}
// returns true IFF is_in_error_state() has been changed as a result of this call.
--- a/src/hotspot/share/oops/method.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/oops/method.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/metadataOnStackMark.hpp"
+#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "code/debugInfoRec.hpp"
@@ -378,7 +379,83 @@
assert(valid_itable_index(), "");
}
+// The RegisterNatives call being attempted tried to register with a method that
+// is not native. Ask JVM TI what prefixes have been specified. Then check
+// to see if the native method is now wrapped with the prefixes. See the
+// SetNativeMethodPrefix(es) functions in the JVM TI Spec for details.
+static Method* find_prefixed_native(Klass* k, Symbol* name, Symbol* signature, TRAPS) {
+#if INCLUDE_JVMTI
+ ResourceMark rm(THREAD);
+ Method* method;
+ int name_len = name->utf8_length();
+ char* name_str = name->as_utf8();
+ int prefix_count;
+ char** prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count);
+ for (int i = 0; i < prefix_count; i++) {
+ char* prefix = prefixes[i];
+ int prefix_len = (int)strlen(prefix);
+ // try adding this prefix to the method name and see if it matches another method name
+ int trial_len = name_len + prefix_len;
+ char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1);
+ strcpy(trial_name_str, prefix);
+ strcat(trial_name_str, name_str);
+ TempNewSymbol trial_name = SymbolTable::probe(trial_name_str, trial_len);
+ if (trial_name == NULL) {
+ continue; // no such symbol, so this prefix wasn't used, try the next prefix
+ }
+ method = k->lookup_method(trial_name, signature);
+ if (method == NULL) {
+ continue; // signature doesn't match, try the next prefix
+ }
+ if (method->is_native()) {
+ method->set_is_prefixed_native();
+ return method; // wahoo, we found a prefixed version of the method, return it
+ }
+ // found as non-native, so prefix is good, add it, probably just need more prefixes
+ name_len = trial_len;
+ name_str = trial_name_str;
+ }
+#endif // INCLUDE_JVMTI
+ return NULL; // not found
+}
+
+bool Method::register_native(Klass* k, Symbol* name, Symbol* signature, address entry, TRAPS) {
+ Method* method = k->lookup_method(name, signature);
+ if (method == NULL) {
+ ResourceMark rm(THREAD);
+ stringStream st;
+ st.print("Method '");
+ print_external_name(&st, k, name, signature);
+ st.print("' name or signature does not match");
+ THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), false);
+ }
+ if (!method->is_native()) {
+ // trying to register to a non-native method, see if a JVM TI agent has added prefix(es)
+ method = find_prefixed_native(k, name, signature, THREAD);
+ if (method == NULL) {
+ ResourceMark rm(THREAD);
+ stringStream st;
+ st.print("Method '");
+ print_external_name(&st, k, name, signature);
+ st.print("' is not declared as native");
+ THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), false);
+ }
+ }
+
+ if (entry != NULL) {
+ method->set_native_function(entry, native_bind_event_is_interesting);
+ } else {
+ method->clear_native_function();
+ }
+ if (PrintJNIResolving) {
+ ResourceMark rm(THREAD);
+ tty->print_cr("[Registering JNI native method %s.%s]",
+ method->method_holder()->external_name(),
+ method->name()->as_C_string());
+ }
+ return true;
+}
bool Method::was_executed_more_than(int n) {
// Invocation counter is reset when the Method* is compiled.
--- a/src/hotspot/share/oops/method.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/oops/method.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -346,6 +346,12 @@
// InterpreterRuntime::exception_handler_for_exception.
static int fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_klass, int throw_bci, TRAPS);
+ static bool register_native(Klass* k,
+ Symbol* name,
+ Symbol* signature,
+ address entry,
+ TRAPS);
+
// method data access
MethodData* method_data() const {
return _method_data;
--- a/src/hotspot/share/oops/oopsHierarchy.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/oops/oopsHierarchy.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,8 +37,6 @@
Thread* t = Thread::current_or_null();
if (t != NULL && t->is_Java_thread()) {
frame fr = os::current_frame();
- // This points to the oop creator, I guess current frame points to caller
- assert (fr.pc(), "should point to a vm frame");
t->unhandled_oops()->register_unhandled_oop(this, fr.pc());
}
}
--- a/src/hotspot/share/opto/loopnode.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/opto/loopnode.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -1378,7 +1378,7 @@
bool exceeding_node_budget(uint required = 0) {
assert(C->live_nodes() < C->max_node_limit(), "sanity");
uint available = C->max_node_limit() - C->live_nodes();
- return available < required + _nodes_required;
+ return available < required + _nodes_required + REQUIRE_MIN;
}
uint require_nodes(uint require, uint minreq = REQUIRE_MIN) {
--- a/src/hotspot/share/opto/loopopts.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/opto/loopopts.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -3159,7 +3159,8 @@
Node_List worklist(area);
Node_List sink_list(area);
- if (!may_require_nodes(loop->est_loop_clone_sz(2))) {
+ uint estimate = loop->est_loop_clone_sz(1);
+ if (exceeding_node_budget(estimate)) {
return false;
}
@@ -3184,8 +3185,7 @@
// Set of non-cfg nodes to peel are those that are control
// dependent on the cfg nodes.
- uint i;
- for(i = 0; i < loop->_body.size(); i++ ) {
+ for (uint i = 0; i < loop->_body.size(); i++) {
Node *n = loop->_body.at(i);
Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
if (peel.test(n_c->_idx)) {
@@ -3200,7 +3200,7 @@
// Get a post order schedule of nodes in the peel region
// Result in right-most operand.
- scheduled_nodelist(loop, peel, peel_list );
+ scheduled_nodelist(loop, peel, peel_list);
assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
@@ -3220,25 +3220,21 @@
// Evacuate nodes in peel region into the not_peeled region if possible
uint new_phi_cnt = 0;
uint cloned_for_outside_use = 0;
- for (i = 0; i < peel_list.size();) {
+ for (uint i = 0; i < peel_list.size();) {
Node* n = peel_list.at(i);
#ifndef PRODUCT
if (TracePartialPeeling) n->dump();
#endif
bool incr = true;
- if ( !n->is_CFG() ) {
-
- if ( has_use_in_set(n, not_peel) ) {
-
+ if (!n->is_CFG()) {
+ if (has_use_in_set(n, not_peel)) {
// If not used internal to the peeled region,
// move "n" from peeled to not_peeled region.
-
- if ( !has_use_internal_to_set(n, peel, loop) ) {
-
+ if (!has_use_internal_to_set(n, peel, loop)) {
// if not pinned and not a load (which maybe anti-dependent on a store)
// and not a CMove (Matcher expects only bool->cmove).
if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) {
- cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
+ cloned_for_outside_use += clone_for_use_outside_loop(loop, n, worklist);
sink_list.push(n);
peel >>= n->_idx; // delete n from peel set.
not_peel <<= n->_idx; // add n to not_peel set.
@@ -3254,7 +3250,7 @@
} else {
// Otherwise check for special def-use cases that span
// the peel/not_peel boundary such as bool->if
- clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist );
+ clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
new_phi_cnt++;
}
}
@@ -3262,7 +3258,11 @@
if (incr) i++;
}
- if (new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta) {
+ estimate += cloned_for_outside_use + new_phi_cnt;
+ bool exceed_node_budget = !may_require_nodes(estimate);
+ bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
+
+ if (exceed_node_budget || exceed_phi_limit) {
#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
@@ -3310,7 +3310,7 @@
const uint clone_exit_idx = 1;
const uint orig_exit_idx = 2;
- assert(is_valid_clone_loop_form( loop, peel_list, orig_exit_idx, clone_exit_idx ), "bad clone loop");
+ assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
Node* head_clone = old_new[head->_idx];
LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
@@ -3318,7 +3318,7 @@
// Add phi if "def" node is in peel set and "use" is not
- for(i = 0; i < peel_list.size(); i++ ) {
+ for (uint i = 0; i < peel_list.size(); i++) {
Node *def = peel_list.at(i);
if (!def->is_CFG()) {
for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
@@ -3374,7 +3374,7 @@
// cloned-not_peeled in(0) in(0)
// orig-peeled
- for(i = 0; i < loop->_body.size(); i++ ) {
+ for (uint i = 0; i < loop->_body.size(); i++) {
Node *n = loop->_body.at(i);
if (!n->is_CFG() && n->in(0) != NULL &&
not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
--- a/src/hotspot/share/prims/jni.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/prims/jni.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -2909,89 +2909,6 @@
HOTSPOT_JNI_SETDOUBLEARRAYREGION_RETURN())
-//
-// Interception of natives
-//
-
-// The RegisterNatives call being attempted tried to register with a method that
-// is not native. Ask JVM TI what prefixes have been specified. Then check
-// to see if the native method is now wrapped with the prefixes. See the
-// SetNativeMethodPrefix(es) functions in the JVM TI Spec for details.
-static Method* find_prefixed_native(Klass* k, Symbol* name, Symbol* signature, TRAPS) {
-#if INCLUDE_JVMTI
- ResourceMark rm(THREAD);
- Method* method;
- int name_len = name->utf8_length();
- char* name_str = name->as_utf8();
- int prefix_count;
- char** prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count);
- for (int i = 0; i < prefix_count; i++) {
- char* prefix = prefixes[i];
- int prefix_len = (int)strlen(prefix);
-
- // try adding this prefix to the method name and see if it matches another method name
- int trial_len = name_len + prefix_len;
- char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1);
- strcpy(trial_name_str, prefix);
- strcat(trial_name_str, name_str);
- TempNewSymbol trial_name = SymbolTable::probe(trial_name_str, trial_len);
- if (trial_name == NULL) {
- continue; // no such symbol, so this prefix wasn't used, try the next prefix
- }
- method = k->lookup_method(trial_name, signature);
- if (method == NULL) {
- continue; // signature doesn't match, try the next prefix
- }
- if (method->is_native()) {
- method->set_is_prefixed_native();
- return method; // wahoo, we found a prefixed version of the method, return it
- }
- // found as non-native, so prefix is good, add it, probably just need more prefixes
- name_len = trial_len;
- name_str = trial_name_str;
- }
-#endif // INCLUDE_JVMTI
- return NULL; // not found
-}
-
-static bool register_native(Klass* k, Symbol* name, Symbol* signature, address entry, TRAPS) {
- Method* method = k->lookup_method(name, signature);
- if (method == NULL) {
- ResourceMark rm;
- stringStream st;
- st.print("Method '");
- Method::print_external_name(&st, k, name, signature);
- st.print("' name or signature does not match");
- THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), false);
- }
- if (!method->is_native()) {
- // trying to register to a non-native method, see if a JVM TI agent has added prefix(es)
- method = find_prefixed_native(k, name, signature, THREAD);
- if (method == NULL) {
- ResourceMark rm;
- stringStream st;
- st.print("Method '");
- Method::print_external_name(&st, k, name, signature);
- st.print("' is not declared as native");
- THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), false);
- }
- }
-
- if (entry != NULL) {
- method->set_native_function(entry,
- Method::native_bind_event_is_interesting);
- } else {
- method->clear_native_function();
- }
- if (PrintJNIResolving) {
- ResourceMark rm(THREAD);
- tty->print_cr("[Registering JNI native method %s.%s]",
- method->method_holder()->external_name(),
- method->name()->as_C_string());
- }
- return true;
-}
-
DT_RETURN_MARK_DECL(RegisterNatives, jint
, HOTSPOT_JNI_REGISTERNATIVES_RETURN(_ret_ref));
@@ -3024,8 +2941,8 @@
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), -1);
}
- bool res = register_native(k, name, signature,
- (address) methods[index].fnPtr, THREAD);
+ bool res = Method::register_native(k, name, signature,
+ (address) methods[index].fnPtr, THREAD);
if (!res) {
ret = -1;
break;
--- a/src/hotspot/share/runtime/arguments.cpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/runtime/arguments.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -548,6 +548,7 @@
{ "SharedMiscDataSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "CompilationPolicyChoice", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "TraceNMethodInstalls", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "FailOverToOldVerifier", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "BindGCTaskThreadsToCPUs", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
{ "UseGCTaskAffinity", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
@@ -593,7 +594,6 @@
{ "TraceSafepointCleanupTime", LogLevel::Info, true, LOG_TAGS(safepoint, cleanup) },
{ "TraceJVMTIObjectTagging", LogLevel::Debug, true, LOG_TAGS(jvmti, objecttagging) },
{ "TraceRedefineClasses", LogLevel::Info, false, LOG_TAGS(redefine, class) },
- { "TraceNMethodInstalls", LogLevel::Info, true, LOG_TAGS(nmethod, install) },
{ NULL, LogLevel::Off, false, LOG_TAGS(_NO_TAG) }
};
--- a/src/hotspot/share/runtime/globals.hpp Thu Oct 24 17:14:42 2019 -0400
+++ b/src/hotspot/share/runtime/globals.hpp Mon Oct 28 11:21:43 2019 -0400
@@ -563,9 +563,6 @@
product(bool, PrintExtendedThreadInfo, false, \
"Print more information in thread dump") \
\
- diagnostic(bool, TraceNMethodInstalls, false, \
- "Trace nmethod installation") \
- \
diagnostic(intx, ScavengeRootsInCode, 2, \
"0: do not allow scavengable oops in the code cache; " \
"1: allow scavenging from the code cache; " \
--- a/src/java.base/share/classes/java/lang/Object.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/classes/java/lang/Object.java Mon Oct 28 11:21:43 2019 -0400
@@ -38,11 +38,6 @@
*/
public class Object {
- private static native void registerNatives();
- static {
- registerNatives();
- }
-
/**
* Constructs a new object.
*/
--- a/src/java.base/share/classes/java/lang/Thread.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/classes/java/lang/Thread.java Mon Oct 28 11:21:43 2019 -0400
@@ -1075,7 +1075,7 @@
* <a href="{@docRoot}/java.base/java/lang/doc-files/threadPrimitiveDeprecation.html">Why
* are Thread.stop, Thread.suspend and Thread.resume Deprecated?</a>.
*/
- @Deprecated(since="1.2")
+ @Deprecated(since="1.2", forRemoval=true)
public final void suspend() {
checkAccess();
suspend0();
@@ -1101,7 +1101,7 @@
* <a href="{@docRoot}/java.base/java/lang/doc-files/threadPrimitiveDeprecation.html">Why
* are Thread.stop, Thread.suspend and Thread.resume Deprecated?</a>.
*/
- @Deprecated(since="1.2")
+ @Deprecated(since="1.2", forRemoval=true)
public final void resume() {
checkAccess();
resume0();
--- a/src/java.base/share/classes/java/lang/ThreadGroup.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/classes/java/lang/ThreadGroup.java Mon Oct 28 11:21:43 2019 -0400
@@ -666,8 +666,8 @@
* @deprecated This method is inherently deadlock-prone. See
* {@link Thread#suspend} for details.
*/
- @Deprecated(since="1.2")
- @SuppressWarnings("deprecation")
+ @Deprecated(since="1.2", forRemoval=true)
+ @SuppressWarnings("removal")
public final void suspend() {
if (stopOrSuspend(true))
Thread.currentThread().suspend();
@@ -680,7 +680,7 @@
* if (and only if) the current thread is found to be in this thread
* group or one of its subgroups.
*/
- @SuppressWarnings("deprecation")
+ @SuppressWarnings({"deprecation", "removal"})
private boolean stopOrSuspend(boolean suspend) {
boolean suicide = false;
Thread us = Thread.currentThread();
@@ -729,8 +729,8 @@
* both of which have been deprecated, as they are inherently
* deadlock-prone. See {@link Thread#suspend} for details.
*/
- @Deprecated(since="1.2")
- @SuppressWarnings("deprecation")
+ @Deprecated(since="1.2", forRemoval=true)
+ @SuppressWarnings("removal")
public final void resume() {
int ngroupsSnapshot;
ThreadGroup[] groupsSnapshot;
@@ -1070,7 +1070,7 @@
* which is deprecated. Further, the behavior of this call
* was never specified.
*/
- @Deprecated(since="1.2")
+ @Deprecated(since="1.2", forRemoval=true)
public boolean allowThreadSuspension(boolean b) {
return true;
}
--- a/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Mon Oct 28 11:21:43 2019 -0400
@@ -305,8 +305,10 @@
* If the type of the given remote address is not supported
*
* @throws SecurityException
- * If a security manager has been installed
- * and it does not permit access to the given remote address
+ * If a security manager has been installed and it does not
+ * permit access to the given remote address, or if unbound,
+ * the security manager {@link SecurityManager#checkListen checkListen}
+ * method denies the operation
*
* @throws IOException
* If some other I/O error occurs
@@ -409,6 +411,11 @@
* closing the channel and setting the current thread's
* interrupt status
*
+ * @throws SecurityException
+ * If unbound, and a security manager has been installed and
+ * its {@link SecurityManager#checkListen checkListen} method
+ * denies the operation
+ *
* @throws IOException
* If some other I/O error occurs
*/
@@ -480,9 +487,10 @@
* If the type of the given remote address is not supported
*
* @throws SecurityException
- * If a security manager has been installed
- * and it does not permit datagrams to be sent
- * to the given address
+ * If a security manager has been installed and it does not permit
+ * datagrams to be sent to the given address, or if unbound, and
+ * the security manager's {@link SecurityManager#checkListen checkListen}
+ * method denies the operation
*
* @throws IOException
* If some other I/O error occurs
--- a/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java Mon Oct 28 11:21:43 2019 -0400
@@ -27,6 +27,8 @@
import java.io.FileDescriptor;
import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.lang.ref.Cleaner.Cleanable;
import java.net.DatagramSocket;
import java.net.Inet4Address;
import java.net.Inet6Address;
@@ -55,6 +57,7 @@
import java.util.Set;
import java.util.concurrent.locks.ReentrantLock;
+import jdk.internal.ref.CleanerFactory;
import sun.net.ResourceManager;
import sun.net.ext.ExtendedSocketOptions;
import sun.net.util.IPAddressUtil;
@@ -68,7 +71,7 @@
implements SelChImpl
{
// Used to make native read and write calls
- private static NativeDispatcher nd = new DatagramDispatcher();
+ private static final NativeDispatcher nd = new DatagramDispatcher();
// The protocol family of the socket
private final ProtocolFamily family;
@@ -76,6 +79,7 @@
// Our file descriptor
private final FileDescriptor fd;
private final int fdVal;
+ private final Cleanable cleaner;
// Cached InetAddress and port for unconnected DatagramChannels
// used by receive0
@@ -138,6 +142,7 @@
ResourceManager.afterUdpClose();
throw ioe;
}
+ this.cleaner = CleanerFactory.cleaner().register(this, closerFor(fd));
}
public DatagramChannelImpl(SelectorProvider sp, ProtocolFamily family)
@@ -164,6 +169,7 @@
ResourceManager.afterUdpClose();
throw ioe;
}
+ this.cleaner = CleanerFactory.cleaner().register(this, closerFor(fd));
}
public DatagramChannelImpl(SelectorProvider sp, FileDescriptor fd)
@@ -179,6 +185,7 @@
: StandardProtocolFamily.INET;
this.fd = fd;
this.fdVal = IOUtil.fdVal(fd);
+ this.cleaner = CleanerFactory.cleaner().register(this, closerFor(fd));
synchronized (stateLock) {
this.localAddress = Net.localAddress(fd);
}
@@ -1181,10 +1188,10 @@
if ((readerThread == 0) && (writerThread == 0) && !isRegistered()) {
state = ST_CLOSED;
try {
- nd.close(fd);
- } finally {
- // notify resource manager
- ResourceManager.afterUdpClose();
+ // close socket
+ cleaner.clean();
+ } catch (UncheckedIOException ioe) {
+ throw ioe.getCause();
}
return true;
} else {
@@ -1283,13 +1290,6 @@
}
}
- @SuppressWarnings("deprecation")
- protected void finalize() throws IOException {
- // fd is null if constructor threw exception
- if (fd != null)
- close();
- }
-
/**
* Translates native poll revent set into a ready operation set
*/
@@ -1377,6 +1377,21 @@
return fdVal;
}
+ /**
+ * Returns an action to close the given file descriptor.
+ */
+ private static Runnable closerFor(FileDescriptor fd) {
+ return () -> {
+ try {
+ nd.close(fd);
+ } catch (IOException ioe) {
+ throw new UncheckedIOException(ioe);
+ } finally {
+ // decrement
+ ResourceManager.afterUdpClose();
+ }
+ };
+ }
// -- Native methods --
--- a/src/java.base/share/classes/sun/nio/ch/NioSocketImpl.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/classes/sun/nio/ch/NioSocketImpl.java Mon Oct 28 11:21:43 2019 -0400
@@ -30,8 +30,7 @@
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UncheckedIOException;
-import java.lang.invoke.MethodHandles;
-import java.lang.invoke.VarHandle;
+import java.lang.ref.Cleaner.Cleanable;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ProtocolFamily;
@@ -106,7 +105,7 @@
// set by SocketImpl.create, protected by stateLock
private boolean stream;
- private FileDescriptorCloser closer;
+ private Cleanable cleaner;
// set to true when the socket is in non-blocking mode
private volatile boolean nonBlocking;
@@ -471,9 +470,10 @@
ResourceManager.afterUdpClose();
throw ioe;
}
+ Runnable closer = closerFor(fd, stream);
this.fd = fd;
this.stream = stream;
- this.closer = FileDescriptorCloser.create(this);
+ this.cleaner = CleanerFactory.cleaner().register(this, closer);
this.state = ST_UNCONNECTED;
}
}
@@ -777,10 +777,11 @@
}
// set the fields
+ Runnable closer = closerFor(newfd, true);
synchronized (nsi.stateLock) {
nsi.fd = newfd;
nsi.stream = true;
- nsi.closer = FileDescriptorCloser.create(nsi);
+ nsi.cleaner = CleanerFactory.cleaner().register(nsi, closer);
nsi.localport = localAddress.getPort();
nsi.address = isaa[0].getAddress();
nsi.port = isaa[0].getPort();
@@ -850,7 +851,7 @@
assert Thread.holdsLock(stateLock) && state == ST_CLOSING;
if (readerThread == 0 && writerThread == 0) {
try {
- closer.run();
+ cleaner.clean();
} catch (UncheckedIOException ioe) {
throw ioe.getCause();
} finally {
@@ -1193,53 +1194,28 @@
}
/**
- * A task that closes a SocketImpl's file descriptor. The task runs when the
- * SocketImpl is explicitly closed and when the SocketImpl becomes phantom
- * reachable.
+ * Returns an action to close the given file descriptor.
*/
- private static class FileDescriptorCloser implements Runnable {
- private static final VarHandle CLOSED;
- static {
- try {
- MethodHandles.Lookup l = MethodHandles.lookup();
- CLOSED = l.findVarHandle(FileDescriptorCloser.class,
- "closed",
- boolean.class);
- } catch (Exception e) {
- throw new InternalError(e);
- }
- }
-
- private final FileDescriptor fd;
- private final boolean stream;
- private volatile boolean closed;
-
- FileDescriptorCloser(FileDescriptor fd, boolean stream) {
- this.fd = fd;
- this.stream = stream;
- }
-
- static FileDescriptorCloser create(NioSocketImpl impl) {
- assert Thread.holdsLock(impl.stateLock);
- var closer = new FileDescriptorCloser(impl.fd, impl.stream);
- CleanerFactory.cleaner().register(impl, closer);
- return closer;
- }
-
- @Override
- public void run() {
- if (CLOSED.compareAndSet(this, false, true)) {
+ private static Runnable closerFor(FileDescriptor fd, boolean stream) {
+ if (stream) {
+ return () -> {
+ try {
+ nd.close(fd);
+ } catch (IOException ioe) {
+ throw new UncheckedIOException(ioe);
+ }
+ };
+ } else {
+ return () -> {
try {
nd.close(fd);
} catch (IOException ioe) {
throw new UncheckedIOException(ioe);
} finally {
- if (!stream) {
- // decrement
- ResourceManager.afterUdpClose();
- }
+ // decrement
+ ResourceManager.afterUdpClose();
}
- }
+ };
}
}
--- a/src/java.base/share/classes/sun/security/tools/KeyStoreUtil.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/classes/sun/security/tools/KeyStoreUtil.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -304,14 +304,17 @@
public static void loadProviderByClass(
String provClass, String arg, ClassLoader cl) {
- // For compatibility, SunPKCS11 and OracleUcrypto can still be
- // loadable with -providerClass.
+ // For compatibility, SunPKCS11, OracleUcrypto, and SunMSCAPI
+ // can still be loadable with -providerClass.
if (provClass.equals("sun.security.pkcs11.SunPKCS11")) {
loadProviderByName("SunPKCS11", arg);
return;
} else if (provClass.equals("com.oracle.security.crypto.UcryptoProvider")) {
loadProviderByName("OracleUcrypto", arg);
return;
+ } else if (provClass.equals("sun.security.mscapi.SunMSCAPI")) {
+ loadProviderByName("SunMSCAPI", arg);
+ return;
}
Provider prov;
--- a/src/java.base/share/native/libjava/Object.c Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.base/share/native/libjava/Object.c Mon Oct 28 11:21:43 2019 -0400
@@ -39,21 +39,6 @@
#include "java_lang_Object.h"
-static JNINativeMethod methods[] = {
- {"hashCode", "()I", (void *)&JVM_IHashCode},
- {"wait", "(J)V", (void *)&JVM_MonitorWait},
- {"notify", "()V", (void *)&JVM_MonitorNotify},
- {"notifyAll", "()V", (void *)&JVM_MonitorNotifyAll},
- {"clone", "()Ljava/lang/Object;", (void *)&JVM_Clone},
-};
-
-JNIEXPORT void JNICALL
-Java_java_lang_Object_registerNatives(JNIEnv *env, jclass cls)
-{
- (*env)->RegisterNatives(env, cls,
- methods, sizeof(methods)/sizeof(methods[0]));
-}
-
JNIEXPORT jclass JNICALL
Java_java_lang_Object_getClass(JNIEnv *env, jobject this)
{
--- a/src/java.management.rmi/share/classes/com/sun/jmx/remote/internal/rmi/ProxyRef.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management.rmi/share/classes/com/sun/jmx/remote/internal/rmi/ProxyRef.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,8 @@
import java.rmi.server.RemoteRef;
-@SuppressWarnings("deprecation")
+@SuppressWarnings({"deprecation",
+ "serial"}) // Externalizable class w/o no-arg c'tor
public class ProxyRef implements RemoteRef {
private static final long serialVersionUID = -6503061366316814723L;
--- a/src/java.management.rmi/share/classes/javax/management/remote/rmi/RMIConnector.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management.rmi/share/classes/javax/management/remote/rmi/RMIConnector.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2245,6 +2245,7 @@
*
* @see #RMIConnector(RMIServer,Map)
**/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final RMIServer rmiServer;
/**
--- a/src/java.management/share/classes/javax/management/Attribute.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/Attribute.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
/**
* @serial Attribute value
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private Object value= null;
--- a/src/java.management/share/classes/javax/management/AttributeChangeNotification.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/AttributeChangeNotification.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -74,11 +74,13 @@
/**
* @serial The MBean attribute old value.
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private Object oldValue = null;
/**
* @serial The MBean attribute new value.
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private Object newValue = null;
--- a/src/java.management/share/classes/javax/management/BadAttributeValueExpException.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/BadAttributeValueExpException.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@
* @serial A string representation of the attribute that originated this exception.
* for example, the string value can be the return of {@code attribute.toString()}
*/
+ @SuppressWarnings("serial") // See handling in constructor and readObject
private Object val;
/**
--- a/src/java.management/share/classes/javax/management/ImmutableDescriptor.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/ImmutableDescriptor.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,7 @@
* elements in this array match the corresponding elements in the
* {@code names} array.
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final Object[] values;
private transient int hashCode = -1;
--- a/src/java.management/share/classes/javax/management/InvalidApplicationException.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/InvalidApplicationException.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
/**
* @serial The object representing the class of the MBean
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object val;
--- a/src/java.management/share/classes/javax/management/NotificationFilterSupport.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/NotificationFilterSupport.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,7 @@
* @serial {@link Vector} that contains the enabled notification types.
* The default value is an empty vector.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private List<String> enabledTypes = new Vector<String>();
--- a/src/java.management/share/classes/javax/management/loading/PrivateMLet.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/loading/PrivateMLet.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
*
* @since 1.5
*/
+@SuppressWarnings("serial") // Externalizable class w/o no-arg c'tor
public class PrivateMLet extends MLet implements PrivateClassLoader {
private static final long serialVersionUID = 2503458973393711979L;
--- a/src/java.management/share/classes/javax/management/monitor/MonitorNotification.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/monitor/MonitorNotification.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -162,6 +162,7 @@
/**
* @serial Monitor notification observed object.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private ObjectName observedObject = null;
/**
@@ -172,6 +173,7 @@
/**
* @serial Monitor notification derived gauge.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object derivedGauge = null;
/**
@@ -179,6 +181,7 @@
* This value is used to keep the threshold/string (depending on the
* monitor type) that triggered off this notification.
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object trigger = null;
--- a/src/java.management/share/classes/javax/management/openmbean/CompositeDataSupport.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/openmbean/CompositeDataSupport.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,6 +62,7 @@
* respective values.
* A {@link SortedMap} is used for faster retrieval of elements.
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final SortedMap<String, Object> contents;
/**
--- a/src/java.management/share/classes/javax/management/openmbean/OpenMBeanAttributeInfoSupport.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/openmbean/OpenMBeanAttributeInfoSupport.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,27 +64,32 @@
/**
* @serial The open mbean attribute's <i>open type</i>
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private OpenType<?> openType;
/**
* @serial The open mbean attribute's default value
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final Object defaultValue;
/**
* @serial The open mbean attribute's legal values. This {@link
* Set} is unmodifiable
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final Set<?> legalValues; // to be constructed unmodifiable
/**
* @serial The open mbean attribute's min value
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final Comparable<?> minValue;
/**
* @serial The open mbean attribute's max value
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private final Comparable<?> maxValue;
--- a/src/java.management/share/classes/javax/management/openmbean/OpenMBeanParameterInfoSupport.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/openmbean/OpenMBeanParameterInfoSupport.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,27 +58,32 @@
/**
* @serial The open mbean parameter's <i>open type</i>
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private OpenType<?> openType;
/**
* @serial The open mbean parameter's default value
*/
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private Object defaultValue = null;
/**
* @serial The open mbean parameter's legal values. This {@link
* Set} is unmodifiable
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private Set<?> legalValues = null; // to be constructed unmodifiable
/**
* @serial The open mbean parameter's min value
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private Comparable<?> minValue = null;
/**
* @serial The open mbean parameter's max value
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private Comparable<?> maxValue = null;
--- a/src/java.management/share/classes/javax/management/openmbean/TabularDataSupport.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/openmbean/TabularDataSupport.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,6 +86,7 @@
* @serial This tabular data instance's contents: a {@link HashMap}
*/
// field cannot be final because of clone method
+ @SuppressWarnings("serial") // Conditionally serializable
private Map<Object,CompositeData> dataMap;
/**
--- a/src/java.management/share/classes/javax/management/openmbean/TabularType.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/javax/management/openmbean/TabularType.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,7 @@
* @serial The items used to index each row element, kept in the order the user gave
* This is an unmodifiable {@link ArrayList}
*/
+ @SuppressWarnings("serial") // Conditionally serializable
private List<String> indexNames;
--- a/src/java.management/share/classes/sun/management/LazyCompositeData.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/LazyCompositeData.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
public abstract class LazyCompositeData
implements CompositeData, Serializable {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private CompositeData compositeData;
// Implementation of the CompositeData interface
--- a/src/java.management/share/classes/sun/management/LockInfoCompositeData.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/LockInfoCompositeData.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
* construction of a CompositeData use in the local case.
*/
public class LockInfoCompositeData extends LazyCompositeData {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final LockInfo lock;
private LockInfoCompositeData(LockInfo li) {
--- a/src/java.management/share/classes/sun/management/MemoryNotifInfoCompositeData.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/MemoryNotifInfoCompositeData.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
* construction of a CompositeData use in the local case.
*/
public class MemoryNotifInfoCompositeData extends LazyCompositeData {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final MemoryNotificationInfo memoryNotifInfo;
private MemoryNotifInfoCompositeData(MemoryNotificationInfo info) {
--- a/src/java.management/share/classes/sun/management/MemoryUsageCompositeData.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/MemoryUsageCompositeData.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
* construction of a CompositeData use in the local case.
*/
public class MemoryUsageCompositeData extends LazyCompositeData {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final MemoryUsage usage;
private MemoryUsageCompositeData(MemoryUsage u) {
--- a/src/java.management/share/classes/sun/management/MonitorInfoCompositeData.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/MonitorInfoCompositeData.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,7 @@
* construction of a CompositeData use in the local case.
*/
public class MonitorInfoCompositeData extends LazyCompositeData {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final MonitorInfo lock;
private MonitorInfoCompositeData(MonitorInfo mi) {
--- a/src/java.management/share/classes/sun/management/ThreadInfoCompositeData.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/ThreadInfoCompositeData.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,9 @@
* construction of a CompositeData use in the local case.
*/
public class ThreadInfoCompositeData extends LazyCompositeData {
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final ThreadInfo threadInfo;
+ @SuppressWarnings("serial") // Not statically typed as Serializable
private final CompositeData cdata;
private ThreadInfoCompositeData(ThreadInfo ti) {
--- a/src/java.management/share/classes/sun/management/counter/perf/PerfByteArrayCounter.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/counter/perf/PerfByteArrayCounter.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
public class PerfByteArrayCounter extends AbstractCounter
implements ByteArrayCounter {
+ @SuppressWarnings("serial") // Value indirectly copied as a byte[] in writeReplace
ByteBuffer bb;
PerfByteArrayCounter(String name, Units u, Variability v,
--- a/src/java.management/share/classes/sun/management/counter/perf/PerfLongArrayCounter.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/counter/perf/PerfLongArrayCounter.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
public class PerfLongArrayCounter extends AbstractCounter
implements LongArrayCounter {
+
+ @SuppressWarnings("serial") // Value indirectly copied as a long[] in writeReplace
LongBuffer lb;
PerfLongArrayCounter(String name, Units u, Variability v,
--- a/src/java.management/share/classes/sun/management/counter/perf/PerfLongCounter.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.management/share/classes/sun/management/counter/perf/PerfLongCounter.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
public class PerfLongCounter extends AbstractCounter
implements LongCounter {
+ @SuppressWarnings("serial") // Value indirectly copied as a long[] in writeReplace
LongBuffer lb;
// package private
--- a/src/java.xml/share/legal/bcel.md Thu Oct 24 17:14:42 2019 -0400
+++ b/src/java.xml/share/legal/bcel.md Mon Oct 28 11:21:43 2019 -0400
@@ -1,4 +1,4 @@
-## Apache Commons Byte Code Engineering Library (BCEL) Version 6.0
+## Apache Commons Byte Code Engineering Library (BCEL) Version 6.3.1
### Apache Commons BCEL Notice
<pre>
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java Mon Oct 28 11:21:43 2019 -0400
@@ -967,6 +967,11 @@
native boolean isCurrentThreadAttached();
/**
+ * @see HotSpotJVMCIRuntime#getCurrentJavaThread()
+ */
+ native long getCurrentJavaThread();
+
+ /**
* @see HotSpotJVMCIRuntime#attachCurrentThread
*/
native boolean attachCurrentThread(boolean asDaemon);
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java Mon Oct 28 11:21:43 2019 -0400
@@ -999,6 +999,14 @@
}
/**
+ * Gets the address of the HotSpot {@code JavaThread} C++ object for the current thread. This
+ * will return {@code 0} if called from an unattached JVMCI shared library thread.
+ */
+ public long getCurrentJavaThread() {
+ return compilerToVm.getCurrentJavaThread();
+ }
+
+ /**
* Ensures the current thread is attached to the peer runtime.
*
* @param asDaemon if the thread is not yet attached, should it be attached as a daemon
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaType.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaType.java Mon Oct 28 11:21:43 2019 -0400
@@ -52,4 +52,13 @@
}
return arrayOfType;
}
+
+ /**
+ * Checks whether this type is currently being initialized. If a type is being initialized it
+ * implies that it was {@link #isLinked() linked} and that the static initializer is currently
+ * being run.
+ *
+ * @return {@code true} if this type is being initialized
+ */
+ abstract boolean isBeingInitialized();
}
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl.java Mon Oct 28 11:21:43 2019 -0400
@@ -360,6 +360,11 @@
}
@Override
+ public boolean isBeingInitialized() {
+ return isArray() ? false : getInitState() == config().instanceKlassStateBeingInitialized;
+ }
+
+ @Override
public boolean isLinked() {
return isArray() ? true : getInitState() >= config().instanceKlassStateLinked;
}
@@ -379,7 +384,7 @@
public void initialize() {
if (!isInitialized()) {
runtime().compilerToVm.ensureInitialized(this);
- assert isInitialized();
+ assert isInitialized() || isBeingInitialized();
}
}
@@ -578,11 +583,6 @@
return new AssumptionResult<>(resolvedMethod);
}
- if (resolvedMethod.canBeStaticallyBound()) {
- // No assumptions are required.
- return new AssumptionResult<>(resolvedMethod);
- }
-
ResolvedJavaMethod result = resolvedMethod.uniqueConcreteMethod(this);
if (result != null) {
return new AssumptionResult<>(result, new ConcreteMethod(method, this, result));
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedPrimitiveType.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedPrimitiveType.java Mon Oct 28 11:21:43 2019 -0400
@@ -150,6 +150,11 @@
}
@Override
+ public boolean isBeingInitialized() {
+ return false;
+ }
+
+ @Override
public boolean isLinked() {
return true;
}
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotSpeculationEncoding.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotSpeculationEncoding.java Mon Oct 28 11:21:43 2019 -0400
@@ -29,7 +29,6 @@
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
-import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
import jdk.vm.ci.meta.SpeculationLog.SpeculationReasonEncoding;
@@ -37,8 +36,8 @@
/**
* Implements a {@link SpeculationReasonEncoding} that {@linkplain #getByteArray() produces} a byte
* array. Data is added via a {@link DataOutputStream}. When producing the final byte array, if the
- * total length of data exceeds the length of a SHA-1 digest, then a SHA-1 digest of the data is
- * produced instead.
+ * total length of data exceeds the length of a SHA-1 digest and a SHA-1 digest algorithm is
+ * available, then a SHA-1 digest of the data is produced instead.
*/
final class HotSpotSpeculationEncoding extends ByteArrayOutputStream implements SpeculationReasonEncoding {
@@ -152,21 +151,33 @@
}
/**
- * Prototype SHA1 digest that is cloned before use.
+ * Prototype SHA1 digest.
*/
- private static final MessageDigest SHA1 = getSHA1();
- private static final int SHA1_LENGTH = SHA1.getDigestLength();
+ private static final MessageDigest SHA1;
- private static MessageDigest getSHA1() {
+ /**
+ * Cloning the prototype is quicker than calling {@link MessageDigest#getInstance(String)} every
+ * time.
+ */
+ private static final boolean SHA1_IS_CLONEABLE;
+ private static final int SHA1_LENGTH;
+
+ static {
+ MessageDigest sha1 = null;
+ boolean sha1IsCloneable = false;
try {
- MessageDigest sha1 = MessageDigest.getInstance("SHA-1");
+ sha1 = MessageDigest.getInstance("SHA-1");
sha1.clone();
- return sha1;
- } catch (CloneNotSupportedException | NoSuchAlgorithmException e) {
+ sha1IsCloneable = true;
+ } catch (NoSuchAlgorithmException e) {
// Should never happen given that SHA-1 is mandated in a
- // compliant Java platform implementation.
- throw new JVMCIError("Expect a cloneable implementation of a SHA-1 message digest to be available", e);
+ // compliant Java platform implementation. However, be
+ // conservative and fall back to not using a digest.
+ } catch (CloneNotSupportedException e) {
}
+ SHA1 = sha1;
+ SHA1_IS_CLONEABLE = sha1IsCloneable;
+ SHA1_LENGTH = SHA1 == null ? 20 : SHA1.getDigestLength();
}
/**
@@ -175,12 +186,12 @@
*/
byte[] getByteArray() {
if (result == null) {
- if (count > SHA1_LENGTH) {
+ if (SHA1 != null && count > SHA1_LENGTH) {
try {
- MessageDigest md = (MessageDigest) SHA1.clone();
+ MessageDigest md = SHA1_IS_CLONEABLE ? (MessageDigest) SHA1.clone() : MessageDigest.getInstance("SHA-1");
md.update(buf, 0, count);
result = md.digest();
- } catch (CloneNotSupportedException e) {
+ } catch (CloneNotSupportedException | NoSuchAlgorithmException e) {
throw new InternalError(e);
}
} else {
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Mon Oct 28 11:21:43 2019 -0400
@@ -107,6 +107,7 @@
final int instanceKlassStateLinked = getConstant("InstanceKlass::linked", Integer.class);
final int instanceKlassStateFullyInitialized = getConstant("InstanceKlass::fully_initialized", Integer.class);
+ final int instanceKlassStateBeingInitialized = getConstant("InstanceKlass::being_initialized", Integer.class);
final int instanceKlassMiscIsUnsafeAnonymous = getConstant("InstanceKlass::_misc_is_unsafe_anonymous", Integer.class);
final int annotationsFieldAnnotationsOffset = getFieldOffset("Annotations::_fields_annotations", Integer.class, "Array<AnnotationArray*>*");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/SharedHotSpotSpeculationLog.java Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.vm.ci.hotspot;
+
+/**
+ * A wrapper that holds a strong reference to a "master" speculation log that
+ * {@linkplain HotSpotSpeculationLog#managesFailedSpeculations() manages} the failed speculations
+ * list.
+ */
+public class SharedHotSpotSpeculationLog extends HotSpotSpeculationLog {
+ private final HotSpotSpeculationLog masterLog;
+
+ public SharedHotSpotSpeculationLog(HotSpotSpeculationLog masterLog) {
+ super(masterLog.getFailedSpeculationsAddress());
+ this.masterLog = masterLog;
+ }
+
+ @Override
+ public String toString() {
+ return masterLog.toString();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/EncodedSpeculationReason.java Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+package jdk.vm.ci.meta;
+
+import java.util.Arrays;
+import java.util.function.Supplier;
+
+import jdk.vm.ci.meta.SpeculationLog.SpeculationReason;
+
+/**
+ * An implementation of {@link SpeculationReason} based on encoded values.
+ */
+public class EncodedSpeculationReason implements SpeculationReason {
+ final int groupId;
+ final String groupName;
+ final Object[] context;
+ private SpeculationLog.SpeculationReasonEncoding encoding;
+
+ public EncodedSpeculationReason(int groupId, String groupName, Object[] context) {
+ this.groupId = groupId;
+ this.groupName = groupName;
+ this.context = context;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof EncodedSpeculationReason) {
+ if (obj instanceof EncodedSpeculationReason) {
+ EncodedSpeculationReason that = (EncodedSpeculationReason) obj;
+ return this.groupId == that.groupId && Arrays.equals(this.context, that.context);
+ }
+ return false;
+ }
+ return false;
+ }
+
+ @Override
+ public SpeculationLog.SpeculationReasonEncoding encode(Supplier<SpeculationLog.SpeculationReasonEncoding> encodingSupplier) {
+ if (encoding == null) {
+ encoding = encodingSupplier.get();
+ encoding.addInt(groupId);
+ for (Object o : context) {
+ if (o == null) {
+ encoding.addInt(0);
+ } else {
+ addNonNullObject(encoding, o);
+ }
+ }
+ }
+ return encoding;
+ }
+
+ static void addNonNullObject(SpeculationLog.SpeculationReasonEncoding encoding, Object o) {
+ Class<? extends Object> c = o.getClass();
+ if (c == String.class) {
+ encoding.addString((String) o);
+ } else if (c == Byte.class) {
+ encoding.addByte((Byte) o);
+ } else if (c == Short.class) {
+ encoding.addShort((Short) o);
+ } else if (c == Character.class) {
+ encoding.addShort((Character) o);
+ } else if (c == Integer.class) {
+ encoding.addInt((Integer) o);
+ } else if (c == Long.class) {
+ encoding.addLong((Long) o);
+ } else if (c == Float.class) {
+ encoding.addInt(Float.floatToRawIntBits((Float) o));
+ } else if (c == Double.class) {
+ encoding.addLong(Double.doubleToRawLongBits((Double) o));
+ } else if (o instanceof Enum) {
+ encoding.addInt(((Enum<?>) o).ordinal());
+ } else if (o instanceof ResolvedJavaMethod) {
+ encoding.addMethod((ResolvedJavaMethod) o);
+ } else if (o instanceof ResolvedJavaType) {
+ encoding.addType((ResolvedJavaType) o);
+ } else if (o instanceof ResolvedJavaField) {
+ encoding.addField((ResolvedJavaField) o);
+ } else {
+ throw new IllegalArgumentException("Unsupported type for encoding: " + c.getName());
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return groupId + Arrays.hashCode(this.context);
+ }
+
+ @Override
+ public String toString() {
+ return String.format("%s@%d%s", groupName, groupId, Arrays.toString(context));
+ }
+}
--- a/src/jdk.management/share/classes/com/sun/management/ThreadMXBean.java Thu Oct 24 17:14:42 2019 -0400
+++ b/src/jdk.management/share/classes/com/sun/management/ThreadMXBean.java Mon Oct 28 11:21:43 2019 -0400
@@ -122,9 +122,6 @@
* {@link #getThreadAllocatedBytes getThreadAllocatedBytes}(Thread.currentThread().getId());
* </pre></blockquote>
*
- * @implSpec The default implementation throws
- * {@code UnsupportedOperationException}.
- *
* @return an approximation of the total memory allocated, in bytes, in
* heap memory for the current thread
* if thread memory allocation measurement is enabled;
@@ -141,7 +138,7 @@
* @since 14
*/
public default long getCurrentThreadAllocatedBytes() {
- throw new UnsupportedOperationException();
+ return getThreadAllocatedBytes(Thread.currentThread().getId());
}
/**
--- a/test/hotspot/gtest/oops/test_markOop.cpp Thu Oct 24 17:14:42 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/semaphore.inline.hpp"
-#include "threadHelper.inline.hpp"
-#include "unittest.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/ostream.hpp"
-
-// The test doesn't work for PRODUCT because it needs WizardMode
-#ifndef PRODUCT
-static bool test_pattern(stringStream* st, const char* pattern) {
- return (strstr(st->as_string(), pattern) != NULL);
-}
-
-static void assert_test_pattern(Handle object, const char* pattern) {
- stringStream st;
- object->print_on(&st);
- ASSERT_TRUE(test_pattern(&st, pattern)) << pattern << " not in " << st.as_string();
-}
-
-static void assert_not_test_pattern(Handle object, const char* pattern) {
- stringStream st;
- object->print_on(&st);
- ASSERT_FALSE(test_pattern(&st, pattern)) << pattern << " found in " << st.as_string();
-}
-
-class LockerThread : public JavaTestThread {
- oop _obj;
- public:
- LockerThread(Semaphore* post, oop obj) : JavaTestThread(post), _obj(obj) {}
- virtual ~LockerThread() {}
-
- void main_run() {
- Thread* THREAD = Thread::current();
- HandleMark hm(THREAD);
- Handle h_obj(THREAD, _obj);
- ResourceMark rm(THREAD);
-
- // Wait gets the lock inflated.
- // The object will stay locked for the context of 'ol' so the lock will
- // still be inflated after the notify_all() call. Deflation can't happen
- // while an ObjectMonitor is "busy" and being locked is the most "busy"
- // state we have...
- ObjectLocker ol(h_obj, THREAD);
- ol.notify_all(THREAD);
- assert_test_pattern(h_obj, "monitor");
- }
-};
-
-
-TEST_VM(markWord, printing) {
- JavaThread* THREAD = JavaThread::current();
- ThreadInVMfromNative invm(THREAD);
- ResourceMark rm(THREAD);
-
- oop obj = SystemDictionary::Byte_klass()->allocate_instance(THREAD);
-
- FlagSetting fs(WizardMode, true);
- FlagSetting bf(UseBiasedLocking, true);
-
- HandleMark hm(THREAD);
- Handle h_obj(THREAD, obj);
-
- // Biased locking is initially enabled for this java.lang.Byte object.
- assert_test_pattern(h_obj, "is_biased");
-
- // Lock using biased locking.
- BasicObjectLock lock;
- lock.set_obj(obj);
- markWord prototype_header = obj->klass()->prototype_header();
- markWord mark = obj->mark();
- markWord biased_mark = markWord::encode((JavaThread*) THREAD, mark.age(), prototype_header.bias_epoch());
- obj->set_mark(biased_mark);
- // Look for the biased_locker in markWord, not prototype_header.
-#ifdef _LP64
- assert_not_test_pattern(h_obj, "mark(is_biased biased_locker=0x0000000000000000");
-#else
- assert_not_test_pattern(h_obj, "mark(is_biased biased_locker=0x00000000");
-#endif
-
- // Same thread tries to lock it again.
- {
- ObjectLocker ol(h_obj, THREAD);
- assert_test_pattern(h_obj, "locked");
- }
-
- // This is no longer biased, because ObjectLocker revokes the bias.
- assert_test_pattern(h_obj, "is_neutral no_hash");
-
- // Wait gets the lock inflated.
- {
- ObjectLocker ol(h_obj, THREAD);
-
- Semaphore done(0);
- LockerThread* st;
- st = new LockerThread(&done, h_obj());
- st->doit();
-
- ol.wait(THREAD);
- assert_test_pattern(h_obj, "monitor");
- done.wait_with_safepoint_check(THREAD); // wait till the thread is done.
- }
-
- // Make the object older. Not all GCs use this field.
- Universe::heap()->collect(GCCause::_java_lang_system_gc);
- if (UseParallelGC) {
- assert_test_pattern(h_obj, "is_neutral no_hash age 1");
- }
-
- // Hash the object then print it.
- intx hash = h_obj->identity_hash();
- assert_test_pattern(h_obj, "is_neutral hash=0x");
-}
-#endif // PRODUCT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/oops/test_markWord.cpp Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "memory/resourceArea.hpp"
+#include "memory/universe.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/semaphore.inline.hpp"
+#include "threadHelper.inline.hpp"
+#include "unittest.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+// The test doesn't work for PRODUCT because it needs WizardMode
+#ifndef PRODUCT
+static bool test_pattern(stringStream* st, const char* pattern) {
+ return (strstr(st->as_string(), pattern) != NULL);
+}
+
+static void assert_test_pattern(Handle object, const char* pattern) {
+ stringStream st;
+ object->print_on(&st);
+ ASSERT_TRUE(test_pattern(&st, pattern)) << pattern << " not in " << st.as_string();
+}
+
+static void assert_not_test_pattern(Handle object, const char* pattern) {
+ stringStream st;
+ object->print_on(&st);
+ ASSERT_FALSE(test_pattern(&st, pattern)) << pattern << " found in " << st.as_string();
+}
+
+class LockerThread : public JavaTestThread {
+ oop _obj;
+ public:
+ LockerThread(Semaphore* post, oop obj) : JavaTestThread(post), _obj(obj) {}
+ virtual ~LockerThread() {}
+
+ void main_run() {
+ Thread* THREAD = Thread::current();
+ HandleMark hm(THREAD);
+ Handle h_obj(THREAD, _obj);
+ ResourceMark rm(THREAD);
+
+ // Wait gets the lock inflated.
+ // The object will stay locked for the context of 'ol' so the lock will
+ // still be inflated after the notify_all() call. Deflation can't happen
+ // while an ObjectMonitor is "busy" and being locked is the most "busy"
+ // state we have...
+ ObjectLocker ol(h_obj, THREAD);
+ ol.notify_all(THREAD);
+ assert_test_pattern(h_obj, "monitor");
+ }
+};
+
+
+TEST_VM(markWord, printing) {
+ JavaThread* THREAD = JavaThread::current();
+ ThreadInVMfromNative invm(THREAD);
+ ResourceMark rm(THREAD);
+
+ if (!UseBiasedLocking || !BiasedLocking::enabled()) {
+ // Can't test this with biased locking disabled.
+ return;
+ }
+
+ oop obj = SystemDictionary::Byte_klass()->allocate_instance(THREAD);
+
+ FlagSetting fs(WizardMode, true);
+
+ HandleMark hm(THREAD);
+ Handle h_obj(THREAD, obj);
+
+ // Biased locking is initially enabled for this java.lang.Byte object.
+ assert_test_pattern(h_obj, "is_biased");
+
+ // Lock using biased locking.
+ BasicObjectLock lock;
+ lock.set_obj(obj);
+ markWord prototype_header = obj->klass()->prototype_header();
+ markWord mark = obj->mark();
+ markWord biased_mark = markWord::encode((JavaThread*) THREAD, mark.age(), prototype_header.bias_epoch());
+ obj->set_mark(biased_mark);
+ // Look for the biased_locker in markWord, not prototype_header.
+#ifdef _LP64
+ assert_not_test_pattern(h_obj, "mark(is_biased biased_locker=0x0000000000000000");
+#else
+ assert_not_test_pattern(h_obj, "mark(is_biased biased_locker=0x00000000");
+#endif
+
+ // Same thread tries to lock it again.
+ {
+ ObjectLocker ol(h_obj, THREAD);
+ assert_test_pattern(h_obj, "locked");
+ }
+
+ // This is no longer biased, because ObjectLocker revokes the bias.
+ assert_test_pattern(h_obj, "is_neutral no_hash");
+
+ // Wait gets the lock inflated.
+ {
+ ObjectLocker ol(h_obj, THREAD);
+
+ Semaphore done(0);
+ LockerThread* st;
+ st = new LockerThread(&done, h_obj());
+ st->doit();
+
+ ol.wait(THREAD);
+ assert_test_pattern(h_obj, "monitor");
+ done.wait_with_safepoint_check(THREAD); // wait till the thread is done.
+ }
+
+ // Make the object older. Not all GCs use this field.
+ Universe::heap()->collect(GCCause::_java_lang_system_gc);
+ if (UseParallelGC) {
+ assert_test_pattern(h_obj, "is_neutral no_hash age 1");
+ }
+
+ // Hash the object then print it.
+ intx hash = h_obj->identity_hash();
+ assert_test_pattern(h_obj, "is_neutral hash=0x");
+}
+#endif // PRODUCT
--- a/test/hotspot/jtreg/ProblemList.txt Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/ProblemList.txt Mon Oct 28 11:21:43 2019 -0400
@@ -194,7 +194,6 @@
vmTestbase/vm/mlvm/indy/func/jvmti/redefineClassInBootstrap/TestDescription.java 8013267 generic-all
vmTestbase/vm/mlvm/meth/func/java/throwException/Test.java 8058176 generic-all
vmTestbase/vm/mlvm/meth/func/jdi/breakpointOtherStratum/Test.java 8208257,8058176 generic-all
-vmTestbase/vm/mlvm/meth/stress/compiler/deoptimize/Test.java 8058176 generic-all
vmTestbase/vm/mlvm/meth/stress/compiler/i2c_c2i/Test.java 8058176 generic-all
vmTestbase/vm/mlvm/meth/stress/compiler/sequences/Test.java 8058176 generic-all
vmTestbase/vm/mlvm/meth/stress/gc/callSequencesDuringGC/Test.java 8058176 generic-all
--- a/test/hotspot/jtreg/compiler/dependencies/MonomorphicObjectCall/java.base/java/lang/Object.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/compiler/dependencies/MonomorphicObjectCall/java.base/java/lang/Object.java Mon Oct 28 11:21:43 2019 -0400
@@ -34,11 +34,6 @@
@HotSpotIntrinsicCandidate
public Object() {}
- private static native void registerNatives();
- static {
- registerNatives();
- }
-
@HotSpotIntrinsicCandidate
public final native Class<?> getClass();
--- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/TestHotSpotSpeculationLog.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/TestHotSpotSpeculationLog.java Mon Oct 28 11:21:43 2019 -0400
@@ -78,7 +78,8 @@
public synchronized void testFailedSpeculations() {
HotSpotSpeculationLog log = new HotSpotSpeculationLog();
DummyReason reason1 = new DummyReason("dummy1");
- DummyReason reason2 = new DummyReason("dummy2");
+ String longName = new String(new char[2000]).replace('\0', 'X');
+ DummyReason reason2 = new DummyReason(longName);
Assert.assertTrue(log.maySpeculate(reason1));
Assert.assertTrue(log.maySpeculate(reason2));
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/LoopRotateBadNodeBudget.java Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8231565
+ * @summary Node estimate for loop rotate is not correct/sufficient:
+ * assert(delta <= 2 * required) failed: Bad node estimate ...
+ *
+ * @requires !vm.graal.enabled
+ *
+ * @run main/othervm -XX:PartialPeelNewPhiDelta=5 LoopRotateBadNodeBudget
+ * @run main/othervm -Xbatch -XX:PartialPeelNewPhiDelta=5 LoopRotateBadNodeBudget
+ *
+ * @run main/othervm LoopRotateBadNodeBudget
+ * @run main/othervm -Xbatch LoopRotateBadNodeBudget
+ *
+ * NOTE: Test-case seldom manifesting the problem on fast machines.
+ */
+
+public class LoopRotateBadNodeBudget {
+
+ int h;
+ float j(int a, int b) {
+ double d = 0.19881;
+ int c, e[] = new int[9];
+ c = 1;
+ while (++c < 12)
+ switch ((c % 7 * 5) + 122) {
+ case 156:
+ case 46128:
+ case 135:
+ case 148:
+ case 127:
+ break;
+ default:
+ }
+ while ((d += 2) < 62)
+ ;
+ long k = l(e);
+ return k;
+ }
+ long l(int[] a) {
+ long m = 0;
+ for (int i = 0; i < a.length; i++)
+ m = a[i];
+ return m;
+ }
+ void f(String[] g) {
+ int i = 2;
+ for (; i < 20000; ++i)
+ j(3, h);
+ }
+ public static void main(String[] o) {
+ try {
+ LoopRotateBadNodeBudget n = new LoopRotateBadNodeBudget();
+ n.f(o);
+ } catch (Exception ex) {
+ }
+ }
+}
--- a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java Mon Oct 28 11:21:43 2019 -0400
@@ -128,7 +128,6 @@
new LogMessageWithLevel("CLDG Roots", Level.TRACE),
new LogMessageWithLevel("JVMTI Roots", Level.TRACE),
new LogMessageWithLevel("CM RefProcessor Roots", Level.TRACE),
- new LogMessageWithLevel("Wait For Strong Roots", Level.TRACE),
// Redirty Cards
new LogMessageWithLevel("Redirty Cards", Level.DEBUG),
new LogMessageWithLevel("Parallel Redirty", Level.TRACE),
--- a/test/hotspot/jtreg/runtime/8024804/RegisterNatives.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/runtime/8024804/RegisterNatives.java Mon Oct 28 11:21:43 2019 -0400
@@ -23,15 +23,27 @@
/*
* @test
- * @bug 8024804
- * @bug 8028741
+ * @bug 8024804 8028741 8232613
* @summary interface method resolution should skip finding j.l.Object's registerNatives() and succeed in selecting class B's registerNatives()
* @run main RegisterNatives
*/
public class RegisterNatives {
- interface I { void registerNatives(); }
+ interface I {
+ void registerNatives();
+ }
+
interface J extends I {}
- static class B implements J { public void registerNatives() { System.out.println("B"); } }
+
+ interface K {
+ default public void registerNatives() { System.out.println("K"); }
+ }
+
+ static class B implements J {
+ public void registerNatives() { System.out.println("B"); }
+ }
+
+ static class C implements K {}
+
public static void main(String... args) {
System.out.println("Regression test for JDK-8024804, crash when InterfaceMethodref resolves to Object.registerNatives\n");
J val = new B();
@@ -42,6 +54,14 @@
e.printStackTrace();
throw e;
}
+ C cval = new C();
+ try {
+ cval.registerNatives();
+ } catch (IllegalAccessError e) {
+ System.out.println("TEST FAILS - a default method named registerNatives should no longer be masked by removed Object.registerNatives\n");
+ e.printStackTrace();
+ throw e;
+ }
System.out.println("TEST PASSES - no IAE resulted\n");
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/RedefineClasses/RedefineObject.java Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8232613
+ * @summary Ensure Object natives stay registered after redefinition
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * java.base/jdk.internal.org.objectweb.asm
+ * java.compiler
+ * java.instrument
+ * jdk.jartool/sun.tools.jar
+ * @run main RedefineObject buildagent
+ * @run main/othervm -javaagent:redefineagent.jar RedefineObject
+ */
+
+import static jdk.test.lib.Asserts.assertTrue;
+import java.io.FileNotFoundException;
+import java.io.PrintWriter;
+import java.lang.RuntimeException;
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.IllegalClassFormatException;
+import java.lang.instrument.Instrumentation;
+import java.lang.instrument.UnmodifiableClassException;
+import java.security.ProtectionDomain;
+import java.util.Arrays;
+
+import jdk.internal.org.objectweb.asm.ClassReader;
+import jdk.internal.org.objectweb.asm.ClassVisitor;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+
+import static jdk.internal.org.objectweb.asm.Opcodes.ASM6;
+import static jdk.internal.org.objectweb.asm.Opcodes.V1_8;
+
+public class RedefineObject {
+
+ static Instrumentation inst;
+
+ public static void premain(String agentArgs, Instrumentation inst) {
+ RedefineObject.inst = inst;
+ }
+
+ static class Transformer implements ClassFileTransformer {
+
+ public byte[] asm(ClassLoader loader, String className,
+ Class<?> classBeingRedefined,
+ ProtectionDomain protectionDomain, byte[] classfileBuffer)
+ throws IllegalClassFormatException {
+ ClassWriter cw = new ClassWriter(0);
+ // Force an older ASM to force a bytecode update
+ ClassVisitor cv = new DummyClassVisitor(ASM6, cw) { };
+ ClassReader cr = new ClassReader(classfileBuffer);
+ cr.accept(cv, 0);
+ byte[] bytes = cw.toByteArray();
+ return bytes;
+ }
+
+ public class DummyClassVisitor extends ClassVisitor {
+
+ public DummyClassVisitor(int api, ClassVisitor cv) {
+ super(api, cv);
+ }
+
+ public void visit(
+ final int version,
+ final int access,
+ final String name,
+ final String signature,
+ final String superName,
+ final String[] interfaces) {
+ // Artificially lower to JDK 8 version to force a redefine
+ cv.visit(V1_8, access, name, signature, superName, interfaces);
+ }
+ }
+
+ @Override public byte[] transform(ClassLoader loader, String className,
+ Class<?> classBeingRedefined,
+ ProtectionDomain protectionDomain, byte[] classfileBuffer)
+ throws IllegalClassFormatException {
+
+ if (className.contains("java/lang/Object")) {
+ try {
+ // Here we remove and re-add the dummy fields. This shuffles the constant pool
+ return asm(loader, className, classBeingRedefined, protectionDomain, classfileBuffer);
+ } catch (Throwable e) {
+ // The retransform native code that called this method does not propagate
+ // exceptions. Instead of getting an uninformative generic error, catch
+ // problems here and print it, then exit.
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+ return null;
+ }
+ }
+
+ private static void buildAgent() {
+ try {
+ ClassFileInstaller.main("RedefineObject");
+ } catch (Exception e) {
+ throw new RuntimeException("Could not write agent classfile", e);
+ }
+
+ try {
+ PrintWriter pw = new PrintWriter("MANIFEST.MF");
+ pw.println("Premain-Class: RedefineObject");
+ pw.println("Agent-Class: RedefineObject");
+ pw.println("Can-Retransform-Classes: true");
+ pw.close();
+ } catch (FileNotFoundException e) {
+ throw new RuntimeException("Could not write manifest file for the agent", e);
+ }
+
+ sun.tools.jar.Main jarTool = new sun.tools.jar.Main(System.out, System.err, "jar");
+ if (!jarTool.run(new String[] { "-cmf", "MANIFEST.MF", "redefineagent.jar", "RedefineObject.class" })) {
+ throw new RuntimeException("Could not write the agent jar file");
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+
+ int objHash = System.identityHashCode(Object.class);
+ System.out.println("Object hashCode: " + objHash);
+ if (args.length == 1 && args[0].equals("buildagent")) {
+ buildAgent();
+ return;
+ }
+
+ if (inst == null) {
+ throw new RuntimeException("Instrumentation object was null");
+ }
+
+ try {
+ inst.addTransformer(new RedefineObject.Transformer(), true);
+ inst.retransformClasses(Object.class);
+ } catch (UnmodifiableClassException e) {
+ throw new RuntimeException(e);
+ }
+
+ // Exercise native methods on Object after transform
+ Object b = new Object();
+ b.hashCode();
+
+ C c = new C();
+ assertTrue(c.hashCode() != c.clone().hashCode() || c != c.clone());
+ assertTrue(c.clone() instanceof C);
+ c = (C)c.clone(); // native method on new Object
+ }
+
+ private static class C implements Cloneable {
+ @Override
+ protected Object clone() throws CloneNotSupportedException {
+ return super.clone();
+ }
+ }
+}
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/Allocate/alloc001/TestDescription.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/Allocate/alloc001/TestDescription.java Mon Oct 28 11:21:43 2019 -0400
@@ -42,8 +42,6 @@
* @library /vmTestbase
* /test/lib
* @requires os.family != "aix"
- * @comment Test is incompatible with ZGC, due to ZGC's address space requirements.
- * @requires vm.gc != "Z"
* @run driver jdk.test.lib.FileInstaller . .
* @build nsk.jvmti.Allocate.alloc001
* @run shell alloc001.sh
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/bcinstr/BI04/bi04t002/newclass02/java.base/java/lang/Object.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/bcinstr/BI04/bi04t002/newclass02/java.base/java/lang/Object.java Mon Oct 28 11:21:43 2019 -0400
@@ -37,11 +37,6 @@
*/
public class Object {
- private static native void registerNatives();
- static {
- registerNatives();
- }
-
/**
* Returns the runtime class of an object. That <tt>Class</tt>
* object is the object that is locked by <tt>static synchronized</tt>
--- a/test/hotspot/jtreg/vmTestbase/vm/mlvm/meth/stress/compiler/deoptimize/Test.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/hotspot/jtreg/vmTestbase/vm/mlvm/meth/stress/compiler/deoptimize/Test.java Mon Oct 28 11:21:43 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test
+ * @key stress
*
* @summary converted from VM Testbase vm/mlvm/meth/stress/compiler/deoptimize.
* VM Testbase keywords: [feature_mlvm, nonconcurrent, quarantine]
@@ -42,6 +43,8 @@
* @build vm.mlvm.meth.stress.compiler.deoptimize.Test
* @run driver vm.mlvm.share.IndifiedClassesBuilder
*
+ * @requires vm.debug != true
+ *
* @run main/othervm
* -XX:ReservedCodeCacheSize=100m
* vm.mlvm.meth.stress.compiler.deoptimize.Test
@@ -49,6 +52,29 @@
* -threadsExtra 2
*/
+
+/*
+ * @test
+ * @key stress
+ *
+ * @library /vmTestbase
+ * /test/lib
+ * @run driver jdk.test.lib.FileInstaller . .
+ *
+ * @comment build test class and indify classes
+ * @build vm.mlvm.meth.stress.compiler.deoptimize.Test
+ * @run driver vm.mlvm.share.IndifiedClassesBuilder
+ *
+ * @requires vm.debug == true
+ *
+ * @run main/othervm
+ * -XX:ReservedCodeCacheSize=100m
+ * vm.mlvm.meth.stress.compiler.deoptimize.Test
+ * -threadsPerCpu 2
+ * -threadsExtra 2
+ */
+
+
package vm.mlvm.meth.stress.compiler.deoptimize;
import java.lang.invoke.MethodHandle;
--- a/test/jdk/ProblemList-Xcomp.txt Thu Oct 24 17:14:42 2019 -0400
+++ b/test/jdk/ProblemList-Xcomp.txt Mon Oct 28 11:21:43 2019 -0400
@@ -29,3 +29,4 @@
java/lang/invoke/MethodHandles/CatchExceptionTest.java 8146623 generic-all
java/util/stream/test/org/openjdk/tests/java/util/stream/StreamLinkTest.java 8216317 solaris-all
+java/math/BigInteger/largeMemory/SymmetricRangeTests.java 8232840 generic-all
--- a/test/jdk/ProblemList.txt Thu Oct 24 17:14:42 2019 -0400
+++ b/test/jdk/ProblemList.txt Mon Oct 28 11:21:43 2019 -0400
@@ -890,6 +890,4 @@
# jdk_internal
-jdk/internal/platform/docker/TestDockerMemoryMetrics.java 8227317 linux-x64
-
############################################################################
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/nio/channels/DatagramChannel/Unref.java Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8212132
+ * @summary Test that DatagramChannel does not leak file descriptors
+ * @requires os.family != "windows"
+ * @modules jdk.management
+ * @library /test/lib
+ * @run main/othervm Unref
+ */
+
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.net.InetSocketAddress;
+import java.net.StandardProtocolFamily;
+import java.nio.channels.DatagramChannel;
+import java.nio.channels.SelectionKey;
+import java.nio.channels.Selector;
+
+import com.sun.management.UnixOperatingSystemMXBean;
+
+import jtreg.SkippedException;
+import jdk.test.lib.net.IPSupport;
+
+public class Unref {
+
+ interface DatagramChannelSupplier {
+ DatagramChannel get() throws IOException;
+ }
+
+ public static void main(String[] args) throws Exception {
+ if (unixOperatingSystemMXBean() == null)
+ throw new SkippedException("This test requires UnixOperatingSystemMXBean");
+
+ test(DatagramChannel::open);
+ if (IPSupport.hasIPv4())
+ test(() -> DatagramChannel.open(StandardProtocolFamily.INET));
+ if (IPSupport.hasIPv6())
+ test(() -> DatagramChannel.open(StandardProtocolFamily.INET6));
+ }
+
+ static void test(DatagramChannelSupplier supplier) throws Exception {
+ openAndClose(supplier); // warm-up
+
+ try (Selector sel = Selector.open()) {
+ long count = fileDescriptorCount();
+
+ // open+close
+ openAndClose(supplier);
+ assertEquals(fileDescriptorCount(), count);
+
+ // open+unref, file descriptor should be closed by cleaner
+ openAndUnref(supplier);
+ assertEquals(waitForFileDescriptorCount(count), count);
+
+ // open+register+close+flush
+ openRegisterAndClose(supplier, sel);
+ assertEquals(fileDescriptorCount(), count);
+
+ // open+register+flush, file descriptor should be closed by cleaner
+ openRegisterAndUnref(supplier, sel);
+ assertEquals(waitForFileDescriptorCount(count), count);
+ }
+ }
+
+ /**
+ * Create a DatagramChannel and closes it.
+ */
+ static void openAndClose(DatagramChannelSupplier supplier) throws IOException {
+ System.out.println("openAndClose ...");
+ DatagramChannel dc = supplier.get();
+ dc.close();
+ }
+
+ /**
+ * Create a DatagramChannel and exits without closing the channel.
+ */
+ static void openAndUnref(DatagramChannelSupplier supplier) throws IOException {
+ System.out.println("openAndUnref ...");
+ DatagramChannel dc = supplier.get();
+ }
+
+ /**
+ * Create a DatagramChannel, register it with a Selector, close the channel
+ * while register, and then finally flush the channel from the Selector.
+ */
+ static void openRegisterAndClose(DatagramChannelSupplier supplier, Selector sel)
+ throws IOException
+ {
+ System.out.println("openRegisterAndClose ...");
+ try (DatagramChannel dc = supplier.get()) {
+ dc.bind(new InetSocketAddress(0));
+ dc.configureBlocking(false);
+ dc.register(sel, SelectionKey.OP_READ);
+ sel.selectNow();
+ }
+
+ // flush, should close channel
+ sel.selectNow();
+ }
+
+ /**
+ * Creates a DatagramChannel, registers with a Selector, cancels the key
+ * and flushes the channel from the Selector. This method exits without
+ * closing the channel.
+ */
+ static void openRegisterAndUnref(DatagramChannelSupplier supplier, Selector sel)
+ throws IOException
+ {
+ System.out.println("openRegisterAndUnref ...");
+ DatagramChannel dc = supplier.get();
+ dc.bind(new InetSocketAddress(0));
+ dc.configureBlocking(false);
+ SelectionKey key = dc.register(sel, SelectionKey.OP_READ);
+ sel.selectNow();
+ key.cancel();
+ sel.selectNow();
+ }
+
+ /**
+ * If the file descriptor count is higher than the given count then invoke
+ * System.gc() and wait for the file descriptor count to drop.
+ */
+ static long waitForFileDescriptorCount(long target) throws InterruptedException {
+ long actual = fileDescriptorCount();
+ if (actual > target) {
+ System.gc();
+ while ((actual = fileDescriptorCount()) > target) {
+ Thread.sleep(10);
+ }
+ }
+ return actual;
+ }
+
+ static UnixOperatingSystemMXBean unixOperatingSystemMXBean() {
+ return ManagementFactory.getPlatformMXBean(UnixOperatingSystemMXBean.class);
+ }
+
+ static long fileDescriptorCount() {
+ return unixOperatingSystemMXBean().getOpenFileDescriptorCount();
+ }
+
+ static void assertEquals(long actual, long expected) {
+ if (actual != expected)
+ throw new RuntimeException("actual=" + actual + ", expected=" + expected);
+ }
+}
+
--- a/test/jdk/jdk/internal/platform/docker/TestDockerMemoryMetrics.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/jdk/jdk/internal/platform/docker/TestDockerMemoryMetrics.java Mon Oct 28 11:21:43 2019 -0400
@@ -25,6 +25,7 @@
import jdk.test.lib.containers.docker.Common;
import jdk.test.lib.containers.docker.DockerRunOptions;
import jdk.test.lib.containers.docker.DockerTestUtils;
+import jdk.test.lib.process.OutputAnalyzer;
/*
* @test
@@ -119,7 +120,19 @@
.addJavaOpts("-cp", "/test-classes/")
.addJavaOpts("--add-exports", "java.base/jdk.internal.platform=ALL-UNNAMED")
.addClassOptions("kernelmem", value);
- DockerTestUtils.dockerRunJava(opts).shouldHaveExitValue(0).shouldContain("TEST PASSED!!!");
+ OutputAnalyzer oa = DockerTestUtils.dockerRunJava(opts);
+
+ // Some container runtimes (e.g. runc, docker 18.09)
+ // have been built without kernel memory accounting. In
+ // that case, the runtime issues a message on stderr saying
+ // so. Skip the test in that case.
+ if (oa.getStderr().contains("kernel memory accounting disabled")) {
+ System.out.println("Kernel memory accounting disabled, " +
+ "skipping the test case");
+ return;
+ }
+
+ oa.shouldHaveExitValue(0).shouldContain("TEST PASSED!!!");
}
private static void testOomKillFlag(String value, boolean oomKillFlag) throws Exception {
--- a/test/jdk/jdk/jfr/event/gc/collection/TestG1ParallelPhases.java Thu Oct 24 17:14:42 2019 -0400
+++ b/test/jdk/jdk/jfr/event/gc/collection/TestG1ParallelPhases.java Mon Oct 28 11:21:43 2019 -0400
@@ -98,7 +98,6 @@
"CLDGRoots",
"JVMTIRoots",
"CMRefRoots",
- "WaitForStrongRoots",
"MergeER",
"MergeHCC",
"MergeRS",
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/jvm/TestClearStaleConstants.java Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.jfr.jvm;
+
+import java.time.Duration;
+import java.util.List;
+
+import jdk.jfr.consumer.RecordedClass;
+import jdk.jfr.consumer.RecordedClassLoader;
+import jdk.jfr.consumer.RecordedEvent;
+import jdk.jfr.internal.JVM;
+import jdk.jfr.Recording;
+import jdk.test.lib.Asserts;
+import jdk.test.lib.jfr.EventNames;
+import jdk.test.lib.jfr.Events;
+import jdk.test.lib.jfr.TestClassLoader;
+
+/**
+ * @test
+ * @bug 8231081
+ * @key jfr
+ * @requires vm.hasJFR
+ * @modules jdk.jfr/jdk.jfr.internal
+ * @library /test/lib /test/jdk
+ * @run main/othervm -Xlog:class+unload -Xlog:gc -Xmx16m jdk.jfr.jvm.TestClearStaleConstants
+ */
+
+/**
+ * System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent is NOT set.
+ * If this flag is set G1 will never unload classes on System.gc() and
+ * As far as the "jfr" key guarantees no VM flags are set from the outside
+ * it should be enough with System.gc().
+ */
+public final class TestClearStaleConstants {
+ static class MyClass {
+ }
+ private final static String TEST_CLASS_NAME = "jdk.jfr.jvm.TestClearStaleConstants$MyClass";
+ private final static String EVENT_NAME = EventNames.ClassDefine;
+
+ // to prevent the compiler to optimize away all unread writes
+ public static TestClassLoader firstClassLoader;
+ public static TestClassLoader secondClassLoader;
+
+ public static void main(String... args) throws Exception {
+ firstClassLoader = new TestClassLoader();
+ // define a class using a class loader under a recording
+ Class<?> clz = recordClassDefinition(firstClassLoader);
+ JVM jvm = JVM.getJVM();
+ // we will now tag the defined and loaded clz as being in use (no recordings are running here)
+ jvm.getClassIdNonIntrinsic(clz);
+ // null out for unload to occur
+ firstClassLoader = null;
+ clz = null;
+ // provoke unload
+ System.gc();
+ // try to define another class _with the same name_ using a different class loader
+ secondClassLoader = new TestClassLoader();
+ // this will throw a NPE for 8231081 because it will reuse the same class name
+ // that symbol was marked as already serialized by the unload, but since no recordings were running
+ // it was not written to any chunk. This creates a reference to a non-existing symbol, leading to an NPE (no symbol at the expected location).
+ recordClassDefinition(secondClassLoader);
+ }
+
+ private static Class<?> recordClassDefinition(TestClassLoader classLoader) throws Exception {
+ try (Recording recording = new Recording()) {
+ recording.enable(EVENT_NAME);
+ recording.start();
+ Class<?> clz = classLoader.loadClass(TEST_CLASS_NAME);
+ recording.stop();
+ assertClassDefineEvent(recording);
+ return clz;
+ }
+ }
+
+ private static void assertClassDefineEvent(Recording recording) throws Exception {
+ boolean isAnyFound = false;
+ for (RecordedEvent event : Events.fromRecording(recording)) {
+ System.out.println(event);
+ RecordedClass definedClass = event.getValue("definedClass");
+ if (TEST_CLASS_NAME.equals(definedClass.getName())) {
+ RecordedClassLoader definingClassLoader = definedClass.getClassLoader();
+ String definingName = definingClassLoader.getType().getName();
+ String testName = TestClassLoader.class.getName();
+ String errorMsg = "Expected " + testName + ", got " + definingName;
+ Asserts.assertEquals(testName, definingName, errorMsg);
+ Asserts.assertFalse(isAnyFound, "Found more than 1 event");
+ isAnyFound = true;
+ }
+ }
+ Asserts.assertTrue(isAnyFound, "No events found");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/sun/security/mscapi/ProviderClassOption.java Mon Oct 28 11:21:43 2019 -0400
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8231598
+ * @requires os.family == "windows"
+ * @library /test/lib
+ * @summary keytool does not export sun.security.mscapi
+ */
+
+import jdk.test.lib.SecurityTools;
+
+public class ProviderClassOption {
+ public static void main(String[] args) throws Throwable {
+ SecurityTools.keytool("-v -storetype Windows-ROOT -list"
+ + " -providerClass sun.security.mscapi.SunMSCAPI")
+ .shouldHaveExitValue(0);
+ }
+}