--- a/.hgtags Fri Feb 15 17:41:06 2019 -0500
+++ b/.hgtags Sun Feb 17 09:54:08 2019 -0500
@@ -542,3 +542,4 @@
6c377af36a5c4203f16aed8a5e4c2ecc08fcd8bd jdk-12+30
021917019cda1c0c5853255322274f37693a2431 jdk-13+7
b5f7bb57de2f797be34f6c75d45c3245ad37ab97 jdk-12+31
+a535ba736cabc6886acdff36de3a096c46e5ddc5 jdk-13+8
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -2167,6 +2167,14 @@
hlt(0);
}
+void MacroAssembler::warn(const char* msg) {
+ pusha();
+ mov(c_rarg0, (address)msg);
+ mov(lr, CAST_FROM_FN_PTR(address, warning));
+ blrt(lr, 1, 0, MacroAssembler::ret_type_void);
+ popa();
+}
+
void MacroAssembler::unimplemented(const char* what) {
const char* buf = NULL;
{
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1687,7 +1687,7 @@
// Helper for generating a dynamic type check.
- // Smashes rscratch1.
+ // Smashes rscratch1, rscratch2.
void generate_type_check(Register sub_klass,
Register super_check_offset,
Register super_klass,
@@ -1979,6 +1979,10 @@
const Register dst_pos = c_rarg3; // destination position
const Register length = c_rarg4;
+
+ // Registers used as temps
+ const Register dst_klass = c_rarg5;
+
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
@@ -2184,8 +2188,7 @@
arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
r18, L_failed);
- const Register rscratch2_dst_klass = rscratch2;
- __ load_klass(rscratch2_dst_klass, dst); // reload
+ __ load_klass(dst_klass, dst); // reload
// Marshal the base address arguments now, freeing registers.
__ lea(from, Address(src, src_pos, Address::lsl(LogBytesPerHeapOop)));
@@ -2195,24 +2198,25 @@
__ movw(count, length); // length (reloaded)
Register sco_temp = c_rarg3; // this register is free now
assert_different_registers(from, to, count, sco_temp,
- rscratch2_dst_klass, scratch_src_klass);
+ dst_klass, scratch_src_klass);
// assert_clean_int(count, sco_temp);
// Generate the type check.
const int sco_offset = in_bytes(Klass::super_check_offset_offset());
- __ ldrw(sco_temp, Address(rscratch2_dst_klass, sco_offset));
- // assert_clean_int(sco_temp, r18);
- generate_type_check(scratch_src_klass, sco_temp, rscratch2_dst_klass, L_plain_copy);
+ __ ldrw(sco_temp, Address(dst_klass, sco_offset));
+
+ // Smashes rscratch1, rscratch2
+ generate_type_check(scratch_src_klass, sco_temp, dst_klass, L_plain_copy);
// Fetch destination element klass from the ObjArrayKlass header.
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
- __ ldr(rscratch2_dst_klass, Address(rscratch2_dst_klass, ek_offset));
- __ ldrw(sco_temp, Address(rscratch2_dst_klass, sco_offset));
+ __ ldr(dst_klass, Address(dst_klass, ek_offset));
+ __ ldrw(sco_temp, Address(dst_klass, sco_offset));
// the checkcast_copy loop needs two extra arguments:
assert(c_rarg3 == sco_temp, "#3 already in place");
// Set up arguments for checkcast_copy_entry.
- __ mov(c_rarg4, rscratch2_dst_klass); // dst.klass.element_klass
+ __ mov(c_rarg4, dst_klass); // dst.klass.element_klass
__ b(RuntimeAddress(checkcast_copy_entry));
}
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -3230,7 +3230,6 @@
// since the parameter_size includes it.
__ push(r19);
__ mov(r19, index);
- assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
__ load_resolved_reference_at_index(index, r19);
__ pop(r19);
__ push(index); // push appendix (MethodType, CallSite, etc.)
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3645,7 +3645,6 @@
Label L_no_push;
__ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
__ mov(temp, index);
- assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
__ load_resolved_reference_at_index(index, temp);
__ verify_oop(index);
__ push_ptr(index); // push appendix (MethodType, CallSite, etc.)
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -29,6 +29,7 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -512,7 +513,7 @@
__ bind(restart);
- // Get the index into the update buffer. DirtyCardQueue::_index is
+ // Get the index into the update buffer. G1DirtyCardQueue::_index is
// a size_t so ld_ptr is appropriate here.
__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
@@ -539,7 +540,7 @@
__ mflr(R0);
__ std(R0, _abi(lr), R1_SP);
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
__ pop_frame();
__ ld(R0, _abi(lr), R1_SP);
__ mtlr(R0);
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,6 +30,7 @@
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -587,7 +588,7 @@
__ bind(restart);
- // Get the index into the update buffer. DirtyCardQueue::_index is
+ // Get the index into the update buffer. G1DirtyCardQueue::_index is
// a size_t so z_ltg is appropriate here.
__ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
@@ -607,7 +608,7 @@
__ bind(refill);
save_volatile_registers(sasm);
__ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1DirtyCardQueueSet::handle_zero_index_for_thread),
Z_thread);
__ z_lgr(addr_card, idx);
restore_volatile_registers(sasm); // Restore addr_card.
--- a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -315,7 +316,7 @@
int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
__ bind(restart);
- // Load the index into the update buffer. DirtyCardQueue::_index is
+ // Load the index into the update buffer. G1DirtyCardQueue::_index is
// a size_t so ld_ptr is appropriate here.
__ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
@@ -333,7 +334,7 @@
__ bind(refill);
address handle_zero =
CAST_FROM_FN_PTR(address,
- &DirtyCardQueueSet::handle_zero_index_for_thread);
+ &G1DirtyCardQueueSet::handle_zero_index_for_thread);
// This should be rare enough that we can afford to save all the
// scratch registers that the calling context might be using.
__ mov(G1_scratch, L3);
@@ -673,7 +674,7 @@
__ bind(restart);
- // Get the index into the update buffer. DirtyCardQueue::_index is
+ // Get the index into the update buffer. G1DirtyCardQueue::_index is
// a size_t so ld_ptr is appropriate here.
__ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
@@ -694,7 +695,7 @@
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address,
- DirtyCardQueueSet::handle_zero_index_for_thread),
+ G1DirtyCardQueueSet::handle_zero_index_for_thread),
G2_thread);
__ restore_live_registers(true);
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2992,7 +2992,6 @@
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
- assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
__ load_resolved_reference_at_index(temp, index, /*tmp*/recv);
__ verify_oop(temp);
__ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3643,7 +3643,6 @@
// since the parameter_size includes it.
__ push(rbx);
__ mov(rbx, index);
- assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
__ load_resolved_reference_at_index(index, rbx);
__ pop(rbx);
__ push(index); // push appendix (MethodType, CallSite, etc.)
--- a/src/hotspot/share/c1/c1_Decorators.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/c1/c1_Decorators.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -34,5 +34,7 @@
// Use the C1_MASK_BOOLEAN decorator for boolean accesses where the value
// needs to be masked.
const DecoratorSet C1_MASK_BOOLEAN = DECORATOR_LAST << 2;
+// Use the C1_UNSAFE_ACCESS decorator to mark unsafe accesses.
+const DecoratorSet C1_UNSAFE_ACCESS = DECORATOR_LAST << 3;
#endif // SHARE_C1_C1_DECORATORS_HPP
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -2161,7 +2161,7 @@
off.load_item();
src.load_item();
- DecoratorSet decorators = IN_HEAP;
+ DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
if (x->is_volatile()) {
decorators |= MO_SEQ_CST;
@@ -2195,7 +2195,7 @@
set_no_result(x);
- DecoratorSet decorators = IN_HEAP;
+ DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
if (type == T_ARRAY || type == T_OBJECT) {
decorators |= ON_UNKNOWN_OOP_REF;
}
@@ -2211,7 +2211,7 @@
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
- DecoratorSet decorators = IN_HEAP | MO_SEQ_CST;
+ DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
if (type == T_ARRAY || type == T_OBJECT) {
decorators |= ON_UNKNOWN_OOP_REF;
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -669,11 +669,21 @@
_out->print_cr(INT64_FORMAT, *(int64_t*)&d);
break;
}
- case T_ARRAY: {
+ case T_ARRAY: // fall-through
+ case T_OBJECT: {
oop value = mirror->obj_field_acquire(fd->offset());
if (value == NULL) {
_out->print_cr("null");
- } else {
+ } else if (value->is_instance()) {
+ assert(fd->field_type() == T_OBJECT, "");
+ if (value->is_a(SystemDictionary::String_klass())) {
+ const char* ascii_value = java_lang_String::as_quoted_ascii(value);
+ _out->print("\"%s\"", (ascii_value != NULL) ? ascii_value : "");
+ } else {
+ const char* klass_name = value->klass()->name()->as_quoted_ascii();
+ _out->print_cr("%s", klass_name);
+ }
+ } else if (value->is_array()) {
typeArrayOop ta = (typeArrayOop)value;
_out->print("%d", ta->length());
if (value->is_objArray()) {
@@ -682,21 +692,6 @@
_out->print(" %s", klass_name);
}
_out->cr();
- }
- break;
- }
- case T_OBJECT: {
- oop value = mirror->obj_field_acquire(fd->offset());
- if (value == NULL) {
- _out->print_cr("null");
- } else if (value->is_instance()) {
- if (value->is_a(SystemDictionary::String_klass())) {
- const char* ascii_value = java_lang_String::as_quoted_ascii(value);
- _out->print("\"%s\"", (ascii_value != NULL) ? ascii_value : "");
- } else {
- const char* klass_name = value->klass()->name()->as_quoted_ascii();
- _out->print_cr("%s", klass_name);
- }
} else {
ShouldNotReachHere();
}
--- a/src/hotspot/share/ci/ciStreams.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/ci/ciStreams.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -334,15 +334,91 @@
ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
will_link = m->is_loaded();
- // Use the MethodType stored in the CP cache to create a signature
+ // Use the signature stored in the CP cache to create a signature
// with correct types (in respect to class loaders).
- if (has_method_type()) {
- ciSymbol* sig_sym = env->get_symbol(cpool->symbol_at(get_method_signature_index(cpool)));
- ciKlass* pool_holder = env->get_klass(cpool->pool_holder());
- ciMethodType* method_type = get_method_type();
- ciSignature* declared_signature = new (env->arena()) ciSignature(pool_holder, sig_sym, method_type);
- (*declared_signature_result) = declared_signature;
+ //
+ // In classic Java (before Java 7) there is never the slightest
+ // difference between the signature at the call site and that of the
+ // method. Such a difference would have been a type error in the
+ // JVM.
+ //
+ // Now there are a few circumstances where the signature of a call
+ // site (which controls the outgoing stacked arguments) can differ
+ // from the signature of the method (which controls the receipt of
+ // those arguments at the method entry point).
+ //
+ // A. The signatures can differ if the callee is a static method and
+ // the caller thinks it is calling a non-static method (VH.get).
+ // This requires the method signature to have an explicit leading
+ // argument for the implicit 'this', not present at the call site.
+ //
+ // B. The call site can have less specific parameter types than the
+ // method, allowing loosely-typed code to handle strongly-typed
+ // methods. This happens with linkToStatic and related linker
+ // commands. Obviously the loosely-typed code has to ensure that
+ // the strongly typed method's invariants are respected, and this is
+ // done by issuing dynamic casts.
+ //
+ // C. The call site can have more specific parameter types than the
+ // method, allowing loosely-typed methods to handle strongly-typed
+ // requests.
+ //
+ // D. There are corresponding effects with return values, such as
+ // boolean method returning an int to an int-receiving call site,
+ // even though the method thought it returned just a boolean.
+ //
+ // E. The calling sequence at a particular call site may add an
+ // "appendix" argument not mentioned in the call site signature. It
+ // is expected by the method signature, though, and this adds to the
+ // method's arity, even after 'this' parameter effects (A) are
+ // discounted. Appendixes are used by invokehandle and
+ // invokedynamic instructions.
+ //
+ // F. A linker method (linkToStatic, etc.) can also take an extra
+ // argument, a MemberName which routes the call to a concrete
+ // strongly-typed method. In this case the linker method may also
+ // differ in any of the ways A-D. The eventual method will ignore
+ // the presence of the extra argument.
+ //
+ // None of these changes to calling sequences requires an argument
+ // to be moved or reformatted in any way. This works because all
+ // references look alike to the JVM, as do all primitives (except
+ // float/long/double). Another required property of the JVM is
+ // that, if a trailing argument is added or dropped, the placement
+ // of other arguments does not change. This allows cases E and F to
+ // work smoothly, against without any moving or reformatting,
+ // despite the arity change.
+ //
+ if (has_local_signature()) {
+ Symbol* local_signature = cpool->symbol_at(get_method_signature_index(cpool));
+ ciSymbol* sig_sym = env->get_symbol(local_signature);
+ ciKlass* pool_holder = env->get_klass(cpool->pool_holder());
+ ciSignature* call_site_sig = new (env->arena()) ciSignature(pool_holder, cpool, sig_sym);
+ // Examples of how the call site signature can differ from the method's own signature:
+ //
+ // meth = static jboolean java.lang.invoke.VarHandleGuards.guard_LII_Z(jobject, jobject, jint, jint, jobject)
+ // msig = (Ljava/lang/invoke/VarHandle;Ljava/lang/Object;IILjava/lang/invoke/VarHandle$AccessDescriptor;)Z
+ // call = (Ljava/util/concurrent/locks/AbstractQueuedSynchronizer;II)Z
+ //
+ // meth = static jobject java.lang.invoke.LambdaForm$MH/0x0000000800066840.linkToTargetMethod(jobject, jobject)
+ // msig = (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;
+ // call = (Ljava/lang/String;)Ljava/util/function/Predicate;
+ //
+ (*declared_signature_result) = call_site_sig;
+
} else {
+ // We can just use the method's own signature. It may differ from the call site, but not by much.
+ //
+ // Examples of how the call site signature can differ from the method's signature:
+ //
+ // meth = static final native jint java.lang.invoke.MethodHandle.linkToStatic(jobject, jobject, jint, jint, jobject)
+ // msig = (Ljava/lang/Object;Ljava/lang/Object;IILjava/lang/invoke/MemberName;)I
+ // call = (Ljava/lang/invoke/VarHandle;Ljava/lang/Object;IILjava/lang/invoke/MemberName;)Z
+ //
+ // meth = final native jint java.lang.invoke.MethodHandle.invokeBasic(jobject, jobject, jint, jint)
+ // msig = (Ljava/lang/Object;Ljava/lang/Object;II)I
+ // call = (Ljava/lang/invoke/VarHandle;Ljava/lang/Object;II)Z
+ //
(*declared_signature_result) = m->signature();
}
return m;
@@ -372,27 +448,14 @@
}
// ------------------------------------------------------------------
-// ciBytecodeStream::has_method_type
+// ciBytecodeStream::has_local_signature
//
-// Returns true if there is a MethodType argument stored in the
-// constant pool cache at the current bci.
-bool ciBytecodeStream::has_method_type() {
+// Returns true if the method stored in the constant
+// pool cache at the current bci has a local signature.
+bool ciBytecodeStream::has_local_signature() {
GUARDED_VM_ENTRY(
constantPoolHandle cpool(_method->get_Method()->constants());
- return ConstantPool::has_method_type_at_if_loaded(cpool, get_method_index());
- )
-}
-
-// ------------------------------------------------------------------
-// ciBytecodeStream::get_method_type
-//
-// Return the MethodType stored in the constant pool cache at
-// the current bci.
-ciMethodType* ciBytecodeStream::get_method_type() {
- GUARDED_VM_ENTRY(
- constantPoolHandle cpool(_method->get_Method()->constants());
- oop method_type_oop = ConstantPool::method_type_at_if_loaded(cpool, get_method_index());
- return CURRENT_ENV->get_object(method_type_oop)->as_method_type();
+ return ConstantPool::has_local_signature_at_if_loaded(cpool, get_method_index());
)
}
--- a/src/hotspot/share/ci/ciStreams.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/ci/ciStreams.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -245,8 +245,7 @@
ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result);
bool has_appendix();
ciObject* get_appendix();
- bool has_method_type();
- ciMethodType* get_method_type();
+ bool has_local_signature();
ciKlass* get_declared_method_holder();
int get_method_holder_index();
int get_method_signature_index(const constantPoolHandle& cpool);
--- a/src/hotspot/share/classfile/classFileParser.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/classfile/classFileParser.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -564,7 +564,7 @@
}
case JVM_CONSTANT_Dynamic: {
const int name_and_type_ref_index =
- cp->invoke_dynamic_name_and_type_ref_index_at(index);
+ cp->bootstrap_name_and_type_ref_index_at(index);
check_property(valid_cp_range(name_and_type_ref_index, length) &&
cp->tag_at(name_and_type_ref_index).is_name_and_type(),
@@ -579,7 +579,7 @@
}
case JVM_CONSTANT_InvokeDynamic: {
const int name_and_type_ref_index =
- cp->invoke_dynamic_name_and_type_ref_index_at(index);
+ cp->bootstrap_name_and_type_ref_index_at(index);
check_property(valid_cp_range(name_and_type_ref_index, length) &&
cp->tag_at(name_and_type_ref_index).is_name_and_type(),
@@ -5020,7 +5020,8 @@
return true;
}
-// Take pointer to a string. Skip over the longest part of the string that could
+// Take pointer to a UTF8 byte string (not NUL-terminated).
+// Skip over the longest part of the string that could
// be taken as a fieldname. Allow '/' if slash_ok is true.
// Return a pointer to just past the fieldname.
// Return NULL if no fieldname at all was found, or in the case of slash_ok
@@ -5098,7 +5099,8 @@
return (not_first_ch) ? p : NULL;
}
-// Take pointer to a string. Skip over the longest part of the string that could
+// Take pointer to a UTF8 byte string (not NUL-terminated).
+// Skip over the longest part of the string that could
// be taken as a field signature. Allow "void" if void_ok.
// Return a pointer to just past the signature.
// Return NULL if no legal signature is found.
@@ -5132,7 +5134,7 @@
else {
// Skip leading 'L' and ignore first appearance of ';'
signature++;
- char* c = strchr((char*) signature, ';');
+ const char* c = (const char*) memchr(signature, ';', length - 1);
// Format check signature
if (c != NULL) {
int newlen = c - (char*) signature;
@@ -5199,7 +5201,7 @@
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
- "Illegal class name \"%s\" in class file %s", bytes,
+ "Illegal class name \"%.*s\" in class file %s", length, bytes,
_class_name->as_C_string()
);
return;
@@ -5232,7 +5234,7 @@
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
- "Illegal field name \"%s\" in class %s", bytes,
+ "Illegal field name \"%.*s\" in class %s", length, bytes,
_class_name->as_C_string()
);
return;
@@ -5269,7 +5271,7 @@
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
- "Illegal method name \"%s\" in class %s", bytes,
+ "Illegal method name \"%.*s\" in class %s", length, bytes,
_class_name->as_C_string()
);
return;
--- a/src/hotspot/share/classfile/loaderConstraints.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/classfile/loaderConstraints.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,7 +77,10 @@
if (p->hash() == hash) {
if (p->name() == name) {
for (int i = p->num_loaders() - 1; i >= 0; i--) {
- if (p->loader_data(i) == loader_data) {
+ if (p->loader_data(i) == loader_data &&
+ // skip unloaded klasses
+ (p->klass() == NULL ||
+ p->klass()->is_loader_alive())) {
return pp;
}
}
--- a/src/hotspot/share/classfile/stringTable.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/classfile/stringTable.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -334,6 +334,10 @@
if (StringTable::_alt_hash) {
hash = hash_string(name, len, true);
}
+ found_string = StringTable::the_table()->do_lookup(name, len, hash);
+ if (found_string != NULL) {
+ return found_string;
+ }
return StringTable::the_table()->do_intern(string_or_null_h, name, len,
hash, CHECK_NULL);
}
--- a/src/hotspot/share/classfile/systemDictionary.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/classfile/systemDictionary.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -2114,7 +2114,7 @@
ss.print(" wants to load %s %s.",
k->external_kind(), k->external_name());
Klass *existing_klass = constraints()->find_constrained_klass(name, class_loader);
- if (existing_klass->class_loader() != class_loader()) {
+ if (existing_klass != NULL && existing_klass->class_loader() != class_loader()) {
ss.print(" A different %s with the same name was previously loaded by %s. (%s)",
existing_klass->external_kind(),
existing_klass->class_loader_data()->loader_name_and_id(),
@@ -2459,7 +2459,6 @@
Symbol* signature,
Klass* accessing_klass,
Handle *appendix_result,
- Handle *method_type_result,
TRAPS) {
methodHandle empty;
assert(THREAD->can_call_java() ,"");
@@ -2492,7 +2491,6 @@
vmSymbols::linkMethod_signature(),
&args, CHECK_(empty));
Handle mname(THREAD, (oop) result.get_jobject());
- (*method_type_result) = method_type;
return unpack_method_and_appendix(mname, accessing_klass, appendix_box, appendix_result, THREAD);
}
@@ -2811,7 +2809,6 @@
Symbol* name,
Symbol* type,
Handle *appendix_result,
- Handle *method_type_result,
TRAPS) {
methodHandle empty;
Handle bsm, info;
@@ -2853,7 +2850,6 @@
vmSymbols::linkCallSite_signature(),
&args, CHECK_(empty));
Handle mname(THREAD, (oop) result.get_jobject());
- (*method_type_result) = method_type;
return unpack_method_and_appendix(mname, caller, appendix_box, appendix_result, THREAD);
}
--- a/src/hotspot/share/classfile/systemDictionary.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/classfile/systemDictionary.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -481,7 +481,6 @@
Symbol* signature,
Klass* accessing_klass,
Handle *appendix_result,
- Handle *method_type_result,
TRAPS);
// for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic)
// (does not ask Java, since this is a low-level intrinsic defined by the JVM)
@@ -544,7 +543,6 @@
Symbol* name,
Symbol* type,
Handle *appendix_result,
- Handle *method_type_result,
TRAPS);
// Record the error when the first attempt to resolve a reference from a constant
--- a/src/hotspot/share/code/dependencyContext.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/code/dependencyContext.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -107,7 +107,7 @@
_safepoint_counter(SafepointSynchronize::safepoint_counter()) {}
~DependencyContext() {
- assert(_safepoint_counter == SafepointSynchronize::safepoint_counter(), "safepoint happened");
+ assert(SafepointSynchronize::is_same_safepoint(_safepoint_counter), "must be the same safepoint");
}
#else
DependencyContext(nmethodBucket* volatile* bucket_addr, volatile uint64_t* last_cleanup_addr)
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp Fri Feb 15 17:41:06 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/dirtyCardQueue.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1FreeIdSet.hpp"
-#include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadSMR.hpp"
-
-// Closure used for updating remembered sets and recording references that
-// point into the collection set while the mutator is running.
-// Assumed to be only executed concurrently with the mutator. Yields via
-// SuspendibleThreadSet after every card.
-class G1RefineCardConcurrentlyClosure: public CardTableEntryClosure {
-public:
- bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
- G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
-
- if (SuspendibleThreadSet::should_yield()) {
- // Caller will actually yield.
- return false;
- }
- // Otherwise, we finished successfully; return true.
- return true;
- }
-};
-
-DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
- // Dirty card queues are always active, so we create them with their
- // active field set to true.
- PtrQueue(qset, permanent, true /* active */)
-{ }
-
-DirtyCardQueue::~DirtyCardQueue() {
- if (!is_permanent()) {
- flush();
- }
-}
-
-DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
- PtrQueueSet(notify_when_complete),
- _shared_dirty_card_queue(this, true /* permanent */),
- _free_ids(NULL),
- _processed_buffers_mut(0),
- _processed_buffers_rs_thread(0),
- _cur_par_buffer_node(NULL)
-{
- _all_active = true;
-}
-
-DirtyCardQueueSet::~DirtyCardQueueSet() {
- delete _free_ids;
-}
-
-// Determines how many mutator threads can process the buffers in parallel.
-uint DirtyCardQueueSet::num_par_ids() {
- return (uint)os::initial_active_processor_count();
-}
-
-void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
- BufferNode::Allocator* allocator,
- Mutex* lock,
- bool init_free_ids) {
- PtrQueueSet::initialize(cbl_mon, allocator);
- _shared_dirty_card_queue.set_lock(lock);
- if (init_free_ids) {
- _free_ids = new G1FreeIdSet(0, num_par_ids());
- }
-}
-
-void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
- G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
-}
-
-bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
- BufferNode* node,
- bool consume,
- uint worker_i) {
- if (cl == NULL) return true;
- bool result = true;
- void** buf = BufferNode::make_buffer_from_node(node);
- size_t i = node->index();
- size_t limit = buffer_size();
- for ( ; i < limit; ++i) {
- jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
- assert(card_ptr != NULL, "invariant");
- if (!cl->do_card_ptr(card_ptr, worker_i)) {
- result = false; // Incomplete processing.
- break;
- }
- }
- if (consume) {
- assert(i <= buffer_size(), "invariant");
- node->set_index(i);
- }
- return result;
-}
-
-#ifndef ASSERT
-#define assert_fully_consumed(node, buffer_size)
-#else
-#define assert_fully_consumed(node, buffer_size) \
- do { \
- size_t _afc_index = (node)->index(); \
- size_t _afc_size = (buffer_size); \
- assert(_afc_index == _afc_size, \
- "Buffer was not fully consumed as claimed: index: " \
- SIZE_FORMAT ", size: " SIZE_FORMAT, \
- _afc_index, _afc_size); \
- } while (0)
-#endif // ASSERT
-
-bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
- guarantee(_free_ids != NULL, "must be");
-
- uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
- G1RefineCardConcurrentlyClosure cl;
- bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
- _free_ids->release_par_id(worker_i); // release the id
-
- if (result) {
- assert_fully_consumed(node, buffer_size());
- Atomic::inc(&_processed_buffers_mut);
- }
- return result;
-}
-
-bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
- G1RefineCardConcurrentlyClosure cl;
- return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
-}
-
-bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
- assert_at_safepoint();
- return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
-}
-
-bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
- uint worker_i,
- size_t stop_at,
- bool during_pause) {
- assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
- BufferNode* nd = get_completed_buffer(stop_at);
- if (nd == NULL) {
- return false;
- } else {
- if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
- assert_fully_consumed(nd, buffer_size());
- // Done with fully processed buffer.
- deallocate_buffer(nd);
- Atomic::inc(&_processed_buffers_rs_thread);
- } else {
- // Return partially processed buffer to the queue.
- guarantee(!during_pause, "Should never stop early");
- enqueue_completed_buffer(nd);
- }
- return true;
- }
-}
-
-void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
- BufferNode* nd = _cur_par_buffer_node;
- while (nd != NULL) {
- BufferNode* next = nd->next();
- BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
- if (actual == nd) {
- bool b = apply_closure_to_buffer(cl, nd, false);
- guarantee(b, "Should not stop early.");
- nd = next;
- } else {
- nd = actual;
- }
- }
-}
-
-void DirtyCardQueueSet::abandon_logs() {
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- abandon_completed_buffers();
- // Since abandon is done only at safepoints, we can safely manipulate
- // these queues.
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- G1ThreadLocalData::dirty_card_queue(t).reset();
- }
- shared_dirty_card_queue()->reset();
-}
-
-void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
- if (!dcq.is_empty()) {
- dcq.flush();
- }
-}
-
-void DirtyCardQueueSet::concatenate_logs() {
- // Iterate over all the threads, if we find a partial log add it to
- // the global list of logs. Temporarily turn off the limit on the number
- // of outstanding buffers.
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- size_t old_limit = max_completed_buffers();
- set_max_completed_buffers(MaxCompletedBuffersUnlimited);
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
- }
- concatenate_log(_shared_dirty_card_queue);
- set_max_completed_buffers(old_limit);
-}
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp Fri Feb 15 17:41:06 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_G1_DIRTYCARDQUEUE_HPP
-#define SHARE_GC_G1_DIRTYCARDQUEUE_HPP
-
-#include "gc/shared/ptrQueue.hpp"
-#include "memory/allocation.hpp"
-
-class DirtyCardQueueSet;
-class G1FreeIdSet;
-class JavaThread;
-class Monitor;
-
-// A closure class for processing card table entries. Note that we don't
-// require these closure objects to be stack-allocated.
-class CardTableEntryClosure: public CHeapObj<mtGC> {
-public:
- // Process the card whose card table entry is "card_ptr". If returns
- // "false", terminate the iteration early.
- virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
-};
-
-// A ptrQueue whose elements are "oops", pointers to object heads.
-class DirtyCardQueue: public PtrQueue {
-public:
- DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent = false);
-
- // Flush before destroying; queue may be used to capture pending work while
- // doing something else, with auto-flush on completion.
- ~DirtyCardQueue();
-
- // Process queue entries and release resources.
- void flush() { flush_impl(); }
-
- // Compiler support.
- static ByteSize byte_offset_of_index() {
- return PtrQueue::byte_offset_of_index<DirtyCardQueue>();
- }
- using PtrQueue::byte_width_of_index;
-
- static ByteSize byte_offset_of_buf() {
- return PtrQueue::byte_offset_of_buf<DirtyCardQueue>();
- }
- using PtrQueue::byte_width_of_buf;
-
-};
-
-
-
-class DirtyCardQueueSet: public PtrQueueSet {
- DirtyCardQueue _shared_dirty_card_queue;
-
- // Apply the closure to the elements of "node" from it's index to
- // buffer_size. If all closure applications return true, then
- // returns true. Stops processing after the first closure
- // application that returns false, and returns false from this
- // function. If "consume" is true, the node's index is updated to
- // exclude the processed elements, e.g. up to the element for which
- // the closure returned false.
- bool apply_closure_to_buffer(CardTableEntryClosure* cl,
- BufferNode* node,
- bool consume,
- uint worker_i = 0);
-
- // If there are more than stop_at completed buffers, pop one, apply
- // the specified closure to its active elements, and return true.
- // Otherwise return false.
- //
- // A completely processed buffer is freed. However, if a closure
- // invocation returns false, processing is stopped and the partially
- // processed buffer (with its index updated to exclude the processed
- // elements, e.g. up to the element for which the closure returned
- // false) is returned to the completed buffer set.
- //
- // If during_pause is true, stop_at must be zero, and the closure
- // must never return false.
- bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
- uint worker_i,
- size_t stop_at,
- bool during_pause);
-
- bool mut_process_buffer(BufferNode* node);
-
- G1FreeIdSet* _free_ids;
-
- // The number of completed buffers processed by mutator and rs thread,
- // respectively.
- jint _processed_buffers_mut;
- jint _processed_buffers_rs_thread;
-
- // Current buffer node used for parallel iteration.
- BufferNode* volatile _cur_par_buffer_node;
-
- void concatenate_log(DirtyCardQueue& dcq);
-
-public:
- DirtyCardQueueSet(bool notify_when_complete = true);
- ~DirtyCardQueueSet();
-
- void initialize(Monitor* cbl_mon,
- BufferNode::Allocator* allocator,
- Mutex* lock,
- bool init_free_ids = false);
-
- // The number of parallel ids that can be claimed to allow collector or
- // mutator threads to do card-processing work.
- static uint num_par_ids();
-
- static void handle_zero_index_for_thread(JavaThread* t);
-
- // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
- // completed buffers remaining.
- bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
-
- // Apply the given closure to all completed buffers. The given closure's do_card_ptr
- // must never return false. Must only be called during GC.
- bool apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i);
-
- void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
- // Applies the current closure to all completed buffers, non-consumptively.
- // Can be used in parallel, all callers using the iteration state initialized
- // by reset_for_par_iteration.
- void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
-
- DirtyCardQueue* shared_dirty_card_queue() {
- return &_shared_dirty_card_queue;
- }
-
- // If a full collection is happening, reset partial logs, and ignore
- // completed ones: the full collection will make them all irrelevant.
- void abandon_logs();
-
- // If any threads have partial logs, add them to the global list of logs.
- void concatenate_logs();
-
- jint processed_buffers_mut() {
- return _processed_buffers_mut;
- }
- jint processed_buffers_rs_thread() {
- return _processed_buffers_rs_thread;
- }
-
-};
-
-#endif // SHARE_GC_G1_DIRTYCARDQUEUE_HPP
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -25,11 +25,10 @@
#ifndef SHARE_GC_G1_G1BARRIERSET_HPP
#define SHARE_GC_G1_G1BARRIERSET_HPP
-#include "gc/g1/dirtyCardQueue.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
-class DirtyCardQueueSet;
class CardTable;
class G1CardTable;
@@ -42,7 +41,7 @@
BufferNode::Allocator _satb_mark_queue_buffer_allocator;
BufferNode::Allocator _dirty_card_queue_buffer_allocator;
G1SATBMarkQueueSet _satb_mark_queue_set;
- DirtyCardQueueSet _dirty_card_queue_set;
+ G1DirtyCardQueueSet _dirty_card_queue_set;
static G1BarrierSet* g1_barrier_set() {
return barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
@@ -88,7 +87,7 @@
return g1_barrier_set()->_satb_mark_queue_set;
}
- static DirtyCardQueueSet& dirty_card_queue_set() {
+ static G1DirtyCardQueueSet& dirty_card_queue_set() {
return g1_barrier_set()->_dirty_card_queue_set;
}
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -37,6 +37,7 @@
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
@@ -107,7 +108,7 @@
// apply to TLAB allocation, which is not part of this interface: it
// is done by clients of this interface.)
-class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
+class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
private:
size_t _num_dirtied;
G1CollectedHeap* _g1h;
@@ -124,7 +125,7 @@
}
public:
- RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
+ RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
_num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
@@ -1811,7 +1812,7 @@
}
{
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone());
dcqs.set_max_completed_buffers(concurrent_refine()->red_zone());
}
@@ -1954,12 +1955,12 @@
return _hrm->total_free_bytes();
}
-void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
+void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
_hot_card_cache->drain(cl, worker_i);
}
-void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) {
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
size_t n_completed_buffers = 0;
while (dcqs.apply_closure_during_gc(cl, worker_i)) {
n_completed_buffers++;
@@ -2605,10 +2606,10 @@
size_t G1CollectedHeap::pending_card_num() {
size_t extra_cards = 0;
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
- DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);
+ G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);
extra_cards += dcq.size();
}
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
size_t buffer_size = dcqs.buffer_size();
size_t buffer_num = dcqs.completed_buffers_num();
@@ -2630,7 +2631,7 @@
size_t _total_humongous;
size_t _candidate_humongous;
- DirtyCardQueue _dcq;
+ G1DirtyCardQueue _dcq;
bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
assert(region->is_starts_humongous(), "Must start a humongous object");
@@ -3410,10 +3411,10 @@
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
private:
- DirtyCardQueueSet* _queue;
+ G1DirtyCardQueueSet* _queue;
G1CollectedHeap* _g1h;
public:
- G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
+ G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
_queue(queue), _g1h(g1h) { }
virtual void work(uint worker_id) {
@@ -3434,7 +3435,7 @@
dirty_card_queue_set().reset_for_par_iteration();
workers()->run_task(&redirty_task);
- DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -31,6 +31,7 @@
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1EdenRegions.hpp"
#include "gc/g1/g1EvacFailure.hpp"
#include "gc/g1/g1EvacStats.hpp"
@@ -758,7 +759,7 @@
// A set of cards that cover the objects for which the Rsets should be updated
// concurrently after the collection.
- DirtyCardQueueSet _dirty_card_queue_set;
+ G1DirtyCardQueueSet _dirty_card_queue_set;
// After a collection pause, convert the regions in the collection set into free
// regions.
@@ -918,7 +919,7 @@
uint num_task_queues() const;
// A set of cards where updates happened during the GC
- DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
+ G1DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
// Create a G1CollectedHeap with the specified policy.
// Must call the initialize method afterwards.
@@ -983,10 +984,10 @@
void scrub_rem_set();
// Apply the given closure on all cards in the Hot Card Cache, emptying it.
- void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
+ void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i);
// Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
- void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
+ void iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i);
// The shared block offset table array.
G1BlockOffsetTable* bot() const { return _bot; }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -30,6 +30,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1Policy.hpp"
@@ -372,7 +373,7 @@
// _finger set in set_non_marking_state
- _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
+ _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
_max_num_tasks(ParallelGCThreads),
// _num_active_tasks set in set_non_marking_state()
// _tasks set inside the constructor
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/java.hpp"
@@ -378,7 +379,7 @@
void G1ConcurrentRefine::adjust(double update_rs_time,
size_t update_rs_processed_buffers,
double goal_ms) {
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
if (G1UseAdaptiveConcRefinement) {
update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
@@ -386,7 +387,7 @@
// Change the barrier params
if (max_num_threads() == 0) {
// Disable dcqs notification when there are no threads to notify.
- dcqs.set_process_completed_buffers_threshold(DirtyCardQueueSet::ProcessCompletedBuffersThresholdNever);
+ dcqs.set_process_completed_buffers_threshold(G1DirtyCardQueueSet::ProcessCompletedBuffersThresholdNever);
} else {
// Worker 0 is the primary; wakeup is via dcqs notification.
STATIC_ASSERT(max_yellow_zone <= INT_MAX);
@@ -417,7 +418,7 @@
}
uint G1ConcurrentRefine::worker_id_offset() {
- return DirtyCardQueueSet::num_par_ids();
+ return G1DirtyCardQueueSet::num_par_ids();
}
void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) {
@@ -427,7 +428,7 @@
}
bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
size_t curr_buffer_num = dcqs.completed_buffers_num();
// If the number of the buffers falls down into the yellow zone,
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -29,7 +29,6 @@
#include "utilities/globalDefinitions.hpp"
// Forward decl
-class CardTableEntryClosure;
class G1ConcurrentRefine;
class G1ConcurrentRefineThread;
class outputStream;
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -65,7 +66,7 @@
}
bool G1ConcurrentRefineThread::is_active() {
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
return is_primary() ? dcqs.process_completed_buffers() : _active;
}
@@ -74,7 +75,7 @@
if (!is_primary()) {
set_active(true);
} else {
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.set_process_completed_buffers(true);
}
_monitor->notify();
@@ -85,7 +86,7 @@
if (!is_primary()) {
set_active(false);
} else {
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.set_process_completed_buffers(false);
}
}
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -25,11 +25,9 @@
#ifndef SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
-#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/shared/concurrentGCThread.hpp"
// Forward Decl.
-class CardTableEntryClosure;
class G1ConcurrentRefine;
// One or more G1 Concurrent Refinement Threads may be active if concurrent
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
+#include "gc/g1/g1FreeIdSet.hpp"
+#include "gc/g1/g1RemSet.hpp"
+#include "gc/g1/g1ThreadLocalData.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
+
+// Closure used for updating remembered sets and recording references that
+// point into the collection set while the mutator is running.
+// Assumed to be only executed concurrently with the mutator. Yields via
+// SuspendibleThreadSet after every card.
+class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
+public:
+ bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
+ G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
+
+ if (SuspendibleThreadSet::should_yield()) {
+ // Caller will actually yield.
+ return false;
+ }
+ // Otherwise, we finished successfully; return true.
+ return true;
+ }
+};
+
+G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent) :
+ // Dirty card queues are always active, so we create them with their
+ // active field set to true.
+ PtrQueue(qset, permanent, true /* active */)
+{ }
+
+G1DirtyCardQueue::~G1DirtyCardQueue() {
+ if (!is_permanent()) {
+ flush();
+ }
+}
+
+G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
+ PtrQueueSet(notify_when_complete),
+ _shared_dirty_card_queue(this, true /* permanent */),
+ _free_ids(NULL),
+ _processed_buffers_mut(0),
+ _processed_buffers_rs_thread(0),
+ _cur_par_buffer_node(NULL)
+{
+ _all_active = true;
+}
+
+G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
+ delete _free_ids;
+}
+
+// Determines how many mutator threads can process the buffers in parallel.
+uint G1DirtyCardQueueSet::num_par_ids() {
+ return (uint)os::initial_active_processor_count();
+}
+
+void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
+ BufferNode::Allocator* allocator,
+ Mutex* lock,
+ bool init_free_ids) {
+ PtrQueueSet::initialize(cbl_mon, allocator);
+ _shared_dirty_card_queue.set_lock(lock);
+ if (init_free_ids) {
+ _free_ids = new G1FreeIdSet(0, num_par_ids());
+ }
+}
+
+void G1DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
+ G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
+}
+
+bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
+ BufferNode* node,
+ bool consume,
+ uint worker_i) {
+ if (cl == NULL) return true;
+ bool result = true;
+ void** buf = BufferNode::make_buffer_from_node(node);
+ size_t i = node->index();
+ size_t limit = buffer_size();
+ for ( ; i < limit; ++i) {
+ jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
+ assert(card_ptr != NULL, "invariant");
+ if (!cl->do_card_ptr(card_ptr, worker_i)) {
+ result = false; // Incomplete processing.
+ break;
+ }
+ }
+ if (consume) {
+ assert(i <= buffer_size(), "invariant");
+ node->set_index(i);
+ }
+ return result;
+}
+
+#ifndef ASSERT
+#define assert_fully_consumed(node, buffer_size)
+#else
+#define assert_fully_consumed(node, buffer_size) \
+ do { \
+ size_t _afc_index = (node)->index(); \
+ size_t _afc_size = (buffer_size); \
+ assert(_afc_index == _afc_size, \
+ "Buffer was not fully consumed as claimed: index: " \
+ SIZE_FORMAT ", size: " SIZE_FORMAT, \
+ _afc_index, _afc_size); \
+ } while (0)
+#endif // ASSERT
+
+bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
+ guarantee(_free_ids != NULL, "must be");
+
+ uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
+ G1RefineCardConcurrentlyClosure cl;
+ bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
+ _free_ids->release_par_id(worker_i); // release the id
+
+ if (result) {
+ assert_fully_consumed(node, buffer_size());
+ Atomic::inc(&_processed_buffers_mut);
+ }
+ return result;
+}
+
+bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
+ G1RefineCardConcurrentlyClosure cl;
+ return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
+}
+
+bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
+ assert_at_safepoint();
+ return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
+}
+
+bool G1DirtyCardQueueSet::apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
+ uint worker_i,
+ size_t stop_at,
+ bool during_pause) {
+ assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
+ BufferNode* nd = get_completed_buffer(stop_at);
+ if (nd == NULL) {
+ return false;
+ } else {
+ if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
+ assert_fully_consumed(nd, buffer_size());
+ // Done with fully processed buffer.
+ deallocate_buffer(nd);
+ Atomic::inc(&_processed_buffers_rs_thread);
+ } else {
+ // Return partially processed buffer to the queue.
+ guarantee(!during_pause, "Should never stop early");
+ enqueue_completed_buffer(nd);
+ }
+ return true;
+ }
+}
+
+void G1DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl) {
+ BufferNode* nd = _cur_par_buffer_node;
+ while (nd != NULL) {
+ BufferNode* next = nd->next();
+ BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
+ if (actual == nd) {
+ bool b = apply_closure_to_buffer(cl, nd, false);
+ guarantee(b, "Should not stop early.");
+ nd = next;
+ } else {
+ nd = actual;
+ }
+ }
+}
+
+void G1DirtyCardQueueSet::abandon_logs() {
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+ abandon_completed_buffers();
+ // Since abandon is done only at safepoints, we can safely manipulate
+ // these queues.
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ G1ThreadLocalData::dirty_card_queue(t).reset();
+ }
+ shared_dirty_card_queue()->reset();
+}
+
+void G1DirtyCardQueueSet::concatenate_log(G1DirtyCardQueue& dcq) {
+ if (!dcq.is_empty()) {
+ dcq.flush();
+ }
+}
+
+void G1DirtyCardQueueSet::concatenate_logs() {
+ // Iterate over all the threads, if we find a partial log add it to
+ // the global list of logs. Temporarily turn off the limit on the number
+ // of outstanding buffers.
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+ size_t old_limit = max_completed_buffers();
+ set_max_completed_buffers(MaxCompletedBuffersUnlimited);
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
+ }
+ concatenate_log(_shared_dirty_card_queue);
+ set_max_completed_buffers(old_limit);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
+#define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
+
+#include "gc/shared/ptrQueue.hpp"
+#include "memory/allocation.hpp"
+
+class G1DirtyCardQueueSet;
+class G1FreeIdSet;
+class JavaThread;
+class Monitor;
+
+// A closure class for processing card table entries. Note that we don't
+// require these closure objects to be stack-allocated.
+class G1CardTableEntryClosure: public CHeapObj<mtGC> {
+public:
+ // Process the card whose card table entry is "card_ptr". If returns
+ // "false", terminate the iteration early.
+ virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
+};
+
+// A ptrQueue whose elements are "oops", pointers to object heads.
+class G1DirtyCardQueue: public PtrQueue {
+public:
+ G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent = false);
+
+ // Flush before destroying; queue may be used to capture pending work while
+ // doing something else, with auto-flush on completion.
+ ~G1DirtyCardQueue();
+
+ // Process queue entries and release resources.
+ void flush() { flush_impl(); }
+
+ // Compiler support.
+ static ByteSize byte_offset_of_index() {
+ return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
+ }
+ using PtrQueue::byte_width_of_index;
+
+ static ByteSize byte_offset_of_buf() {
+ return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
+ }
+ using PtrQueue::byte_width_of_buf;
+
+};
+
+
+
+class G1DirtyCardQueueSet: public PtrQueueSet {
+ G1DirtyCardQueue _shared_dirty_card_queue;
+
+ // Apply the closure to the elements of "node" from it's index to
+ // buffer_size. If all closure applications return true, then
+ // returns true. Stops processing after the first closure
+ // application that returns false, and returns false from this
+ // function. If "consume" is true, the node's index is updated to
+ // exclude the processed elements, e.g. up to the element for which
+ // the closure returned false.
+ bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
+ BufferNode* node,
+ bool consume,
+ uint worker_i = 0);
+
+ // If there are more than stop_at completed buffers, pop one, apply
+ // the specified closure to its active elements, and return true.
+ // Otherwise return false.
+ //
+ // A completely processed buffer is freed. However, if a closure
+ // invocation returns false, processing is stopped and the partially
+ // processed buffer (with its index updated to exclude the processed
+ // elements, e.g. up to the element for which the closure returned
+ // false) is returned to the completed buffer set.
+ //
+ // If during_pause is true, stop_at must be zero, and the closure
+ // must never return false.
+ bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
+ uint worker_i,
+ size_t stop_at,
+ bool during_pause);
+
+ bool mut_process_buffer(BufferNode* node);
+
+ G1FreeIdSet* _free_ids;
+
+ // The number of completed buffers processed by mutator and rs thread,
+ // respectively.
+ jint _processed_buffers_mut;
+ jint _processed_buffers_rs_thread;
+
+ // Current buffer node used for parallel iteration.
+ BufferNode* volatile _cur_par_buffer_node;
+
+ void concatenate_log(G1DirtyCardQueue& dcq);
+
+public:
+ G1DirtyCardQueueSet(bool notify_when_complete = true);
+ ~G1DirtyCardQueueSet();
+
+ void initialize(Monitor* cbl_mon,
+ BufferNode::Allocator* allocator,
+ Mutex* lock,
+ bool init_free_ids = false);
+
+ // The number of parallel ids that can be claimed to allow collector or
+ // mutator threads to do card-processing work.
+ static uint num_par_ids();
+
+ static void handle_zero_index_for_thread(JavaThread* t);
+
+ // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
+ // completed buffers remaining.
+ bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
+
+ // Apply the given closure to all completed buffers. The given closure's do_card_ptr
+ // must never return false. Must only be called during GC.
+ bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
+
+ void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
+ // Applies the current closure to all completed buffers, non-consumptively.
+ // Can be used in parallel, all callers using the iteration state initialized
+ // by reset_for_par_iteration.
+ void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
+
+ G1DirtyCardQueue* shared_dirty_card_queue() {
+ return &_shared_dirty_card_queue;
+ }
+
+ // If a full collection is happening, reset partial logs, and ignore
+ // completed ones: the full collection will make them all irrelevant.
+ void abandon_logs();
+
+ // If any threads have partial logs, add them to the global list of logs.
+ void concatenate_logs();
+
+ jint processed_buffers_mut() {
+ return _processed_buffers_mut;
+ }
+ jint processed_buffers_rs_thread() {
+ return _processed_buffers_rs_thread;
+ }
+
+};
+
+#endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,10 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1EvacFailure.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
@@ -41,11 +41,11 @@
class UpdateRSetDeferred : public BasicOopIterateClosure {
private:
G1CollectedHeap* _g1h;
- DirtyCardQueue* _dcq;
+ G1DirtyCardQueue* _dcq;
G1CardTable* _ct;
public:
- UpdateRSetDeferred(DirtyCardQueue* dcq) :
+ UpdateRSetDeferred(G1DirtyCardQueue* dcq) :
_g1h(G1CollectedHeap::heap()), _dcq(dcq), _ct(_g1h->card_table()) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@@ -196,7 +196,7 @@
uint _worker_id;
HeapRegionClaimer* _hrclaimer;
- DirtyCardQueue _dcq;
+ G1DirtyCardQueue _dcq;
UpdateRSetDeferred _update_rset_cl;
public:
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "runtime/atomic.hpp"
@@ -83,7 +83,7 @@
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
}
-void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
+void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_i) {
assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
assert(_hot_cache != NULL, "Logic");
--- a/src/hotspot/share/gc/g1/g1HotCardCache.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -32,8 +32,7 @@
#include "runtime/thread.hpp"
#include "utilities/globalDefinitions.hpp"
-class CardTableEntryClosure;
-class DirtyCardQueue;
+class G1CardTableEntryClosure;
class G1CollectedHeap;
class HeapRegion;
@@ -112,7 +111,7 @@
// Refine the cards that have delayed as a result of
// being in the cache.
- void drain(CardTableEntryClosure* cl, uint worker_i);
+ void drain(G1CardTableEntryClosure* cl, uint worker_i);
// Set up for parallel processing of the cards in the hot cache
void reset_hot_cache_claimed_index() {
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -25,9 +25,9 @@
#ifndef SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
#define SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
-#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RemSet.hpp"
@@ -46,7 +46,7 @@
class G1ParScanThreadState : public CHeapObj<mtGC> {
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
- DirtyCardQueue _dcq;
+ G1DirtyCardQueue _dcq;
G1CardTable* _ct;
G1EvacuationRootClosures* _closures;
@@ -77,7 +77,7 @@
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
- DirtyCardQueue& dirty_card_queue() { return _dcq; }
+ G1DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1CardTable* ct() { return _ct; }
InCSetState dest(InCSetState original) const {
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,12 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1FromCardCache.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HotCardCache.hpp"
@@ -300,7 +300,7 @@
}
uint G1RemSet::num_par_rem_sets() {
- return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
+ return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
}
void G1RemSet::initialize(size_t capacity, uint max_regions) {
@@ -456,7 +456,7 @@
}
// Closure used for updating rem sets. Only called during an evacuation pause.
-class G1RefineCardClosure: public CardTableEntryClosure {
+class G1RefineCardClosure: public G1CardTableEntryClosure {
G1RemSet* _g1rs;
G1ScanObjsDuringUpdateRSClosure* _update_rs_cl;
@@ -520,7 +520,7 @@
}
void G1RemSet::prepare_for_oops_into_collection_set_do() {
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.concatenate_logs();
_scan_state->reset();
@@ -677,7 +677,7 @@
*card_ptr = G1CardTable::dirty_card_val();
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
- DirtyCardQueue* sdcq =
+ G1DirtyCardQueue* sdcq =
G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue();
sdcq->enqueue(card_ptr);
}
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1REMSET_HPP
#define SHARE_GC_G1_G1REMSET_HPP
-#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RemSetSummary.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@@ -53,7 +54,7 @@
void G1RemSetSummary::update() {
_num_conc_refined_cards = _rem_set->num_conc_refined_cards();
- DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
+ G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
_num_processed_buf_mutator = dcqs.processed_buffers_mut();
_num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread();
--- a/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
#ifndef SHARE_GC_G1_G1THREADLOCALDATA_HPP
#define SHARE_GC_G1_G1THREADLOCALDATA_HPP
-#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/shared/satbMarkQueue.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
@@ -33,8 +33,8 @@
class G1ThreadLocalData {
private:
- SATBMarkQueue _satb_mark_queue;
- DirtyCardQueue _dirty_card_queue;
+ SATBMarkQueue _satb_mark_queue;
+ G1DirtyCardQueue _dirty_card_queue;
G1ThreadLocalData() :
_satb_mark_queue(&G1BarrierSet::satb_mark_queue_set()),
@@ -66,7 +66,7 @@
return data(thread)->_satb_mark_queue;
}
- static DirtyCardQueue& dirty_card_queue(Thread* thread) {
+ static G1DirtyCardQueue& dirty_card_queue(Thread* thread) {
return data(thread)->_dirty_card_queue;
}
@@ -83,11 +83,11 @@
}
static ByteSize dirty_card_queue_index_offset() {
- return dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_index();
+ return dirty_card_queue_offset() + G1DirtyCardQueue::byte_offset_of_index();
}
static ByteSize dirty_card_queue_buffer_offset() {
- return dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_buf();
+ return dirty_card_queue_offset() + G1DirtyCardQueue::byte_offset_of_buf();
}
};
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -102,7 +102,7 @@
declare_toplevel_type(PtrQueue) \
declare_toplevel_type(HeapRegionType) \
declare_toplevel_type(SATBMarkQueue) \
- declare_toplevel_type(DirtyCardQueue) \
+ declare_toplevel_type(G1DirtyCardQueue) \
\
declare_toplevel_type(G1CollectedHeap*) \
declare_toplevel_type(HeapRegion*) \
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -75,6 +75,7 @@
bool mismatched = (decorators & C2_MISMATCHED) != 0;
bool unaligned = (decorators & C2_UNALIGNED) != 0;
+ bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
bool in_native = (decorators & IN_NATIVE) != 0;
@@ -93,7 +94,7 @@
}
store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
- access.addr().type(), mo, requires_atomic_access, unaligned, mismatched);
+ access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
access.set_raw_access(store);
} else {
assert(!requires_atomic_access, "not yet supported");
@@ -132,6 +133,7 @@
bool unaligned = (decorators & C2_UNALIGNED) != 0;
bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
bool pinned = (decorators & C2_PINNED_LOAD) != 0;
+ bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
@@ -148,7 +150,7 @@
load = kit->make_load(control, adr, val_type, access.type(), mo);
} else {
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
- dep, requires_atomic_access, unaligned, mismatched);
+ dep, requires_atomic_access, unaligned, mismatched, unsafe);
}
access.set_raw_access(load);
} else {
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -130,7 +130,7 @@
ZBarrierSetC2State* s = bs->state();
if (s->load_barrier_count() >= 2) {
Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
- PhaseIdealLoop ideal_loop(igvn, LoopOptsLastRound);
+ PhaseIdealLoop::optimize(igvn, LoopOptsLastRound);
if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
}
}
--- a/src/hotspot/share/interpreter/bytecodeTracer.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/interpreter/bytecodeTracer.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -382,7 +382,7 @@
st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string());
} else {
if (tag.is_dynamic_constant() || tag.is_invoke_dynamic()) {
- int bsm = constants->invoke_dynamic_bootstrap_method_ref_index_at(i);
+ int bsm = constants->bootstrap_method_ref_index_at(i);
st->print(" bsm=%d", bsm);
}
st->print_cr(" %d <%s%s%s>", i, name->as_C_string(), sep, signature->as_C_string());
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -977,9 +977,6 @@
LastFrameAccessor last_frame(thread);
const Bytecodes::Code bytecode = Bytecodes::_invokedynamic;
- //TO DO: consider passing BCI to Java.
- // int caller_bci = last_frame.method()->bci_from(last_frame.bcp());
-
// resolve method
CallInfo info;
constantPoolHandle pool(thread, last_frame.method()->constants());
--- a/src/hotspot/share/interpreter/linkResolver.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/interpreter/linkResolver.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -94,26 +94,21 @@
}
void CallInfo::set_handle(const methodHandle& resolved_method,
- Handle resolved_appendix,
- Handle resolved_method_type, TRAPS) {
- set_handle(SystemDictionary::MethodHandle_klass(), resolved_method, resolved_appendix, resolved_method_type, CHECK);
+ Handle resolved_appendix, TRAPS) {
+ set_handle(SystemDictionary::MethodHandle_klass(), resolved_method, resolved_appendix, CHECK);
}
void CallInfo::set_handle(Klass* resolved_klass,
const methodHandle& resolved_method,
- Handle resolved_appendix,
- Handle resolved_method_type, TRAPS) {
- if (resolved_method.is_null()) {
- THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null");
- }
+ Handle resolved_appendix, TRAPS) {
+ guarantee(resolved_method.not_null(), "resolved method is null");
assert(resolved_method->intrinsic_id() == vmIntrinsics::_invokeBasic ||
resolved_method->is_compiled_lambda_form(),
"linkMethod must return one of these");
int vtable_index = Method::nonvirtual_vtable_index;
assert(!resolved_method->has_vtable_index(), "");
set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
- _resolved_appendix = resolved_appendix;
- _resolved_method_type = resolved_method_type;
+ _resolved_appendix = resolved_appendix;
}
void CallInfo::set_common(Klass* resolved_klass,
@@ -452,7 +447,6 @@
methodHandle LinkResolver::lookup_polymorphic_method(
const LinkInfo& link_info,
Handle *appendix_result_or_null,
- Handle *method_type_result,
TRAPS) {
Klass* klass = link_info.resolved_klass();
Symbol* name = link_info.name();
@@ -520,7 +514,6 @@
full_signature,
link_info.current_klass(),
&appendix,
- &method_type,
CHECK_NULL);
if (TraceMethodHandles) {
ttyLocker ttyl;
@@ -552,7 +545,6 @@
assert(appendix_result_or_null != NULL, "");
(*appendix_result_or_null) = appendix;
- (*method_type_result) = method_type;
}
return result;
}
@@ -760,7 +752,7 @@
if (resolved_method.is_null()) {
// JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc
- resolved_method = lookup_polymorphic_method(link_info, (Handle*)NULL, (Handle*)NULL, THREAD);
+ resolved_method = lookup_polymorphic_method(link_info, (Handle*)NULL, THREAD);
if (HAS_PENDING_EXCEPTION) {
nested_exception = Handle(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
@@ -1697,10 +1689,8 @@
resolved_klass == SystemDictionary::VarHandle_klass(), "");
assert(MethodHandles::is_signature_polymorphic_name(link_info.name()), "");
Handle resolved_appendix;
- Handle resolved_method_type;
- methodHandle resolved_method = lookup_polymorphic_method(link_info,
- &resolved_appendix, &resolved_method_type, CHECK);
- result.set_handle(resolved_klass, resolved_method, resolved_appendix, resolved_method_type, CHECK);
+ methodHandle resolved_method = lookup_polymorphic_method(link_info, &resolved_appendix, CHECK);
+ result.set_handle(resolved_klass, resolved_method, resolved_appendix, CHECK);
}
void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
@@ -1737,8 +1727,7 @@
if (!cpce->is_f1_null()) {
methodHandle method( THREAD, cpce->f1_as_method());
Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
- Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
- result.set_handle(method, appendix, method_type, THREAD);
+ result.set_handle(method, appendix, THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
return;
}
@@ -1766,8 +1755,7 @@
if (!cpce->is_f1_null()) {
methodHandle method( THREAD, cpce->f1_as_method());
Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
- Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
- result.set_handle(method, appendix, method_type, THREAD);
+ result.set_handle(method, appendix, THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
} else {
assert(cpce->indy_resolution_failed(), "Resolution failure flag not set");
@@ -1788,17 +1776,15 @@
// JSR 292: this must resolve to an implicitly generated method MH.linkToCallSite(*...)
// The appendix argument is likely to be a freshly-created CallSite.
Handle resolved_appendix;
- Handle resolved_method_type;
methodHandle resolved_method =
SystemDictionary::find_dynamic_call_site_invoker(current_klass,
pool_index,
bootstrap_specifier,
method_name, method_signature,
&resolved_appendix,
- &resolved_method_type,
THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
- result.set_handle(resolved_method, resolved_appendix, resolved_method_type, THREAD);
+ result.set_handle(resolved_method, resolved_appendix, THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
}
--- a/src/hotspot/share/interpreter/linkResolver.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/interpreter/linkResolver.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -55,7 +55,6 @@
// others inferred), vtable, itable)
int _call_index; // vtable or itable index of selected class method (if any)
Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix)
- Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites)
Handle _resolved_method_name; // Object holding the ResolvedMethodName
void set_static(Klass* resolved_klass, const methodHandle& resolved_method, TRAPS);
@@ -68,10 +67,10 @@
const methodHandle& selected_method,
int vtable_index, TRAPS);
void set_handle(const methodHandle& resolved_method,
- Handle resolved_appendix, Handle resolved_method_type, TRAPS);
+ Handle resolved_appendix, TRAPS);
void set_handle(Klass* resolved_klass,
const methodHandle& resolved_method,
- Handle resolved_appendix, Handle resolved_method_type, TRAPS);
+ Handle resolved_appendix, TRAPS);
void set_common(Klass* resolved_klass, Klass* selected_klass,
const methodHandle& resolved_method,
const methodHandle& selected_method,
@@ -98,7 +97,6 @@
methodHandle resolved_method() const { return _resolved_method; }
methodHandle selected_method() const { return _selected_method; }
Handle resolved_appendix() const { return _resolved_appendix; }
- Handle resolved_method_type() const { return _resolved_method_type; }
Handle resolved_method_name() const { return _resolved_method_name; }
// Materialize a java.lang.invoke.ResolvedMethodName for this resolved_method
void set_resolved_method_name(TRAPS);
@@ -207,8 +205,7 @@
static Method* lookup_method_in_interfaces(const LinkInfo& link_info);
static methodHandle lookup_polymorphic_method(const LinkInfo& link_info,
- Handle *appendix_result_or_null,
- Handle *method_type_result, TRAPS);
+ Handle *appendix_result_or_null, TRAPS);
JVMCI_ONLY(public:) // Needed for CompilerToVM.resolveMethod()
// Not Linktime so doesn't take LinkInfo
static methodHandle lookup_instance_method_in_klasses (Klass* klass, Symbol* name, Symbol* signature,
--- a/src/hotspot/share/interpreter/rewriter.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/interpreter/rewriter.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -221,13 +221,13 @@
MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(),
_pool->name_ref_at(cp_index))) {
// we may need a resolved_refs entry for the appendix
- add_invokedynamic_resolved_references_entries(cp_index, cache_index);
+ add_invokedynamic_resolved_references_entry(cp_index, cache_index);
status = +1;
} else if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_VarHandle() &&
MethodHandles::is_signature_polymorphic_name(SystemDictionary::VarHandle_klass(),
_pool->name_ref_at(cp_index))) {
// we may need a resolved_refs entry for the appendix
- add_invokedynamic_resolved_references_entries(cp_index, cache_index);
+ add_invokedynamic_resolved_references_entry(cp_index, cache_index);
status = +1;
} else {
status = -1;
@@ -259,7 +259,7 @@
if (!reverse) {
int cp_index = Bytes::get_Java_u2(p);
int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
- int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index);
+ int resolved_index = add_invokedynamic_resolved_references_entry(cp_index, cache_index);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
@@ -307,12 +307,9 @@
// invokedynamic resolved references map also points to cp cache and must
// add delta to each.
int resolved_index = _patch_invokedynamic_refs->at(i);
- for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
- assert(_invokedynamic_references_map.at(resolved_index + entry) == cache_index,
+ assert(_invokedynamic_references_map.at(resolved_index) == cache_index,
"should be the same index");
- _invokedynamic_references_map.at_put(resolved_index+entry,
- cache_index + delta);
- }
+ _invokedynamic_references_map.at_put(resolved_index, cache_index + delta);
}
}
}
--- a/src/hotspot/share/interpreter/rewriter.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/interpreter/rewriter.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -159,19 +159,12 @@
return ref_index;
}
- // add a new entries to the resolved_references map (for invokedynamic and invokehandle only)
- int add_invokedynamic_resolved_references_entries(int cp_index, int cache_index) {
+ // add a new entry to the resolved_references map (for invokedynamic and invokehandle only)
+ int add_invokedynamic_resolved_references_entry(int cp_index, int cache_index) {
assert(_resolved_reference_limit >= 0, "must add indy refs after first iteration");
- int ref_index = -1;
- for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
- const int index = _resolved_references_map.append(cp_index); // many-to-one
- assert(index >= _resolved_reference_limit, "");
- if (entry == 0) {
- ref_index = index;
- }
- assert((index - entry) == ref_index, "entries must be consecutive");
- _invokedynamic_references_map.at_put_grow(index, cache_index, -1);
- }
+ int ref_index = _resolved_references_map.append(cp_index); // many-to-one
+ assert(ref_index >= _resolved_reference_limit, "");
+ _invokedynamic_references_map.at_put_grow(ref_index, cache_index, -1);
return ref_index;
}
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -238,9 +238,8 @@
assert(_thread_group_name == NULL, "invariant");
if (tgname != NULL) {
size_t len = strlen(tgname);
- _thread_group_name = JfrCHeapObj::new_array<char>(len+1);
- strncpy(_thread_group_name, tgname, len);
- _thread_group_name[len] = '\0';
+ _thread_group_name = JfrCHeapObj::new_array<char>(len + 1);
+ strncpy(_thread_group_name, tgname, len + 1);
}
}
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -100,8 +100,7 @@
assert(path != NULL, "invariant");
const size_t path_len = strlen(path);
char* new_path = JfrCHeapObj::new_array<char>(path_len + 1);
- strncpy(new_path, path, path_len);
- new_path[path_len] = '\0';
+ strncpy(new_path, path, path_len + 1);
return new_path;
}
--- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -82,10 +82,6 @@
Heap_lock->unlock();
}
- if (Safepoint_lock->owned_by_self()) {
- Safepoint_lock->unlock();
- }
-
if (VMOperationQueue_lock->owned_by_self()) {
VMOperationQueue_lock->unlock();
}
--- a/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -209,8 +209,7 @@
if (entry_name == NULL) {
return NULL;
}
- strncpy(entry_name, entry, entry_len);
- entry_name[entry_len] = '\0';
+ strncpy(entry_name, entry, entry_len + 1);
const char* const fully_qualified_path_entry = fully_qualified(entry_name);
if (NULL == fully_qualified_path_entry) {
return NULL;
@@ -332,8 +331,7 @@
if (NULL == emergency_dump_path) {
return NULL;
}
- strncpy(emergency_dump_path, buffer, emergency_filename_length);
- emergency_dump_path[emergency_filename_length] = '\0';
+ strncpy(emergency_dump_path, buffer, emergency_filename_length + 1);
}
return emergency_dump_path;
}
@@ -407,8 +405,7 @@
if (_path == NULL) {
return false;
}
- strncpy(_path, path, path_len);
- _path[path_len] = '\0';
+ strncpy(_path, path, path_len + 1);
return true;
}
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -383,8 +383,7 @@
}
bool JfrStackTrace::record_safe(JavaThread* thread, int skip, bool leakp /* false */) {
- assert(SafepointSynchronize::safepoint_safe(thread, thread->thread_state())
- || thread == Thread::current(), "Thread stack needs to be walkable");
+ assert(thread == Thread::current(), "Thread stack needs to be walkable");
vframeStream vfs(thread);
u4 count = 0;
_reached_root = true;
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1241,7 +1241,6 @@
vmassert(MethodHandles::is_signature_polymorphic_method(resolved_method()),"!");
vmassert(!MethodHandles::is_signature_polymorphic_static(resolved_method->intrinsic_id()), "!");
vmassert(cp_cache_entry->appendix_if_resolved(cp) == NULL, "!");
- vmassert(cp_cache_entry->method_type_if_resolved(cp) == NULL, "!");
methodHandle m(LinkResolver::linktime_resolve_virtual_method_or_null(link_info));
vmassert(m == resolved_method, "!!");
--- a/src/hotspot/share/memory/memRegion.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/memory/memRegion.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -34,7 +34,9 @@
// Note that MemRegions are passed by value, not by reference.
// The intent is that they remain very small and contain no
-// objects. These should never be allocated in heap but we do
+// objects. The copy constructor and destructor must be trivial,
+// to support optimization for pass-by-value.
+// These should never be allocated in heap but we do
// create MemRegions (in CardTableBarrierSet) in heap so operator
// new and operator new [] added for this special case.
@@ -59,8 +61,6 @@
assert(end >= start, "incorrect constructor arguments");
}
- MemRegion(const MemRegion& mr): _start(mr._start), _word_size(mr._word_size) {}
-
MemRegion intersection(const MemRegion mr2) const;
// regions must overlap or be adjacent
MemRegion _union(const MemRegion mr2) const;
--- a/src/hotspot/share/oops/constantPool.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/oops/constantPool.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -594,21 +594,13 @@
}
-bool ConstantPool::has_method_type_at_if_loaded(const constantPoolHandle& cpool, int which) {
+bool ConstantPool::has_local_signature_at_if_loaded(const constantPoolHandle& cpool, int which) {
if (cpool->cache() == NULL) return false; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
- return e->has_method_type();
+ return e->has_local_signature();
}
-oop ConstantPool::method_type_at_if_loaded(const constantPoolHandle& cpool, int which) {
- if (cpool->cache() == NULL) return NULL; // nothing to load yet
- int cache_index = decode_cpcache_index(which, true);
- ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
- return e->method_type_if_resolved(cpool);
-}
-
-
Symbol* ConstantPool::impl_name_ref_at(int which, bool uncached) {
int name_index = name_ref_index_at(impl_name_and_type_ref_index_at(which, uncached));
return symbol_at(name_index);
@@ -625,26 +617,22 @@
if (!uncached && cache() != NULL) {
if (ConstantPool::is_invokedynamic_index(which)) {
// Invokedynamic index is index into the constant pool cache
- int pool_index = invokedynamic_cp_cache_entry_at(which)->constant_pool_index();
- pool_index = invoke_dynamic_name_and_type_ref_index_at(pool_index);
+ int pool_index = invokedynamic_bootstrap_ref_index_at(which);
+ pool_index = bootstrap_name_and_type_ref_index_at(pool_index);
assert(tag_at(pool_index).is_name_and_type(), "");
return pool_index;
}
// change byte-ordering and go via cache
i = remap_instruction_operand_from_cache(which);
} else {
- if (tag_at(which).is_invoke_dynamic() ||
- tag_at(which).is_dynamic_constant() ||
- tag_at(which).is_dynamic_constant_in_error()) {
- int pool_index = invoke_dynamic_name_and_type_ref_index_at(which);
+ if (tag_at(which).has_bootstrap()) {
+ int pool_index = bootstrap_name_and_type_ref_index_at(which);
assert(tag_at(pool_index).is_name_and_type(), "");
return pool_index;
}
}
assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
- assert(!tag_at(i).is_invoke_dynamic() &&
- !tag_at(i).is_dynamic_constant() &&
- !tag_at(i).is_dynamic_constant_in_error(), "Must be handled above");
+ assert(!tag_at(i).has_bootstrap(), "Must be handled above");
jint ref_index = *int_at_addr(i);
return extract_high_short_from_int(ref_index);
}
@@ -654,7 +642,7 @@
if (!uncached && cache() != NULL) {
if (ConstantPool::is_invokedynamic_index(which)) {
// Invokedynamic index is index into resolved_references
- pool_index = invokedynamic_cp_cache_entry_at(which)->constant_pool_index();
+ pool_index = invokedynamic_bootstrap_ref_index_at(which);
} else {
// change byte-ordering and go via cache
pool_index = remap_instruction_operand_from_cache(which);
@@ -1128,14 +1116,14 @@
// JVM_CONSTANT_Dynamic is an ordered pair of [bootm, name&ftype], plus optional arguments
// In both cases, the bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
// It is accompanied by the optional arguments.
- int bsm_index = this_cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
+ int bsm_index = this_cp->bootstrap_method_ref_index_at(index);
oop bsm_oop = this_cp->resolve_possibly_cached_constant_at(bsm_index, CHECK_NULL);
if (!java_lang_invoke_MethodHandle::is_instance(bsm_oop)) {
THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "BSM not an MethodHandle");
}
// Extract the optional static arguments.
- argc = this_cp->invoke_dynamic_argument_count_at(index);
+ argc = this_cp->bootstrap_argument_count_at(index);
// if there are no static arguments, return the bsm by itself:
if (argc == 0 && UseBootstrapCallInfo < 2) return bsm_oop;
@@ -1177,7 +1165,7 @@
if (!use_BSCI && this_cp->tag_at(index).is_dynamic_constant()) {
bool found_unresolved_condy = false;
for (int i = 0; i < argc; i++) {
- int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
+ int arg_index = this_cp->bootstrap_argument_index_at(index, i);
if (this_cp->tag_at(arg_index).is_dynamic_constant()) {
// potential recursion point condy -> condy
bool found_it = false;
@@ -1197,7 +1185,7 @@
bool all_resolved = true;
for (int i = 0; i < argc; i++) {
bool found_it = false;
- int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
+ int arg_index = this_cp->bootstrap_argument_index_at(index, i);
this_cp->find_cached_constant_at(arg_index, found_it, CHECK_NULL);
if (!found_it) { all_resolved = false; break; }
}
@@ -1244,7 +1232,7 @@
!(this_cp->tag_at(index).is_invoke_dynamic() ||
this_cp->tag_at(index).is_dynamic_constant()) ||
(0 > start_arg || start_arg > end_arg) ||
- (end_arg > (argc = this_cp->invoke_dynamic_argument_count_at(index))) ||
+ (end_arg > (argc = this_cp->bootstrap_argument_count_at(index))) ||
(0 > pos || pos > limit) ||
(info.is_null() || limit > info->length())) {
// An index or something else went wrong; throw an error.
@@ -1255,7 +1243,7 @@
// now we can loop safely
int info_i = pos;
for (int i = start_arg; i < end_arg; i++) {
- int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
+ int arg_index = this_cp->bootstrap_argument_index_at(index, i);
oop arg_oop;
if (must_resolve) {
arg_oop = this_cp->resolve_possibly_cached_constant_at(arg_index, CHECK);
@@ -1454,10 +1442,10 @@
case JVM_CONSTANT_Dynamic:
{
- int k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
- int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
- int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
- int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
+ int k1 = bootstrap_name_and_type_ref_index_at(index1);
+ int k2 = cp2->bootstrap_name_and_type_ref_index_at(index2);
+ int i1 = bootstrap_methods_attribute_index(index1);
+ int i2 = cp2->bootstrap_methods_attribute_index(index2);
// separate statements and variables because CHECK_false is used
bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false);
bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false);
@@ -1466,10 +1454,10 @@
case JVM_CONSTANT_InvokeDynamic:
{
- int k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
- int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
- int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
- int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
+ int k1 = bootstrap_name_and_type_ref_index_at(index1);
+ int k2 = cp2->bootstrap_name_and_type_ref_index_at(index2);
+ int i1 = bootstrap_methods_attribute_index(index1);
+ int i2 = cp2->bootstrap_methods_attribute_index(index2);
// separate statements and variables because CHECK_false is used
bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false);
bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false);
@@ -1793,16 +1781,16 @@
case JVM_CONSTANT_Dynamic:
case JVM_CONSTANT_DynamicInError:
{
- int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i);
- int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i);
+ int k1 = from_cp->bootstrap_methods_attribute_index(from_i);
+ int k2 = from_cp->bootstrap_name_and_type_ref_index_at(from_i);
k1 += operand_array_length(to_cp->operands()); // to_cp might already have operands
to_cp->dynamic_constant_at_put(to_i, k1, k2);
} break;
case JVM_CONSTANT_InvokeDynamic:
{
- int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i);
- int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i);
+ int k1 = from_cp->bootstrap_methods_attribute_index(from_i);
+ int k2 = from_cp->bootstrap_name_and_type_ref_index_at(from_i);
k1 += operand_array_length(to_cp->operands()); // to_cp might already have operands
to_cp->invoke_dynamic_at_put(to_i, k1, k2);
} break;
@@ -2252,7 +2240,7 @@
*bytes = tag;
idx1 = extract_low_short_from_int(*int_at_addr(idx));
idx2 = extract_high_short_from_int(*int_at_addr(idx));
- assert(idx2 == invoke_dynamic_name_and_type_ref_index_at(idx), "correct half of u4");
+ assert(idx2 == bootstrap_name_and_type_ref_index_at(idx), "correct half of u4");
Bytes::put_Java_u2((address) (bytes+1), idx1);
Bytes::put_Java_u2((address) (bytes+3), idx2);
DBG(printf("JVM_CONSTANT_Dynamic: %hd %hd", idx1, idx2));
@@ -2262,7 +2250,7 @@
*bytes = tag;
idx1 = extract_low_short_from_int(*int_at_addr(idx));
idx2 = extract_high_short_from_int(*int_at_addr(idx));
- assert(idx2 == invoke_dynamic_name_and_type_ref_index_at(idx), "correct half of u4");
+ assert(idx2 == bootstrap_name_and_type_ref_index_at(idx), "correct half of u4");
Bytes::put_Java_u2((address) (bytes+1), idx1);
Bytes::put_Java_u2((address) (bytes+3), idx2);
DBG(printf("JVM_CONSTANT_InvokeDynamic: %hd %hd", idx1, idx2));
@@ -2443,12 +2431,12 @@
case JVM_CONSTANT_Dynamic :
case JVM_CONSTANT_DynamicInError :
{
- st->print("bootstrap_method_index=%d", invoke_dynamic_bootstrap_method_ref_index_at(index));
- st->print(" type_index=%d", invoke_dynamic_name_and_type_ref_index_at(index));
- int argc = invoke_dynamic_argument_count_at(index);
+ st->print("bootstrap_method_index=%d", bootstrap_method_ref_index_at(index));
+ st->print(" type_index=%d", bootstrap_name_and_type_ref_index_at(index));
+ int argc = bootstrap_argument_count_at(index);
if (argc > 0) {
for (int arg_i = 0; arg_i < argc; arg_i++) {
- int arg = invoke_dynamic_argument_index_at(index, arg_i);
+ int arg = bootstrap_argument_index_at(index, arg_i);
st->print((arg_i == 0 ? " arguments={%d" : ", %d"), arg);
}
st->print("}");
@@ -2457,12 +2445,12 @@
break;
case JVM_CONSTANT_InvokeDynamic :
{
- st->print("bootstrap_method_index=%d", invoke_dynamic_bootstrap_method_ref_index_at(index));
- st->print(" name_and_type_index=%d", invoke_dynamic_name_and_type_ref_index_at(index));
- int argc = invoke_dynamic_argument_count_at(index);
+ st->print("bootstrap_method_index=%d", bootstrap_method_ref_index_at(index));
+ st->print(" name_and_type_index=%d", bootstrap_name_and_type_ref_index_at(index));
+ int argc = bootstrap_argument_count_at(index);
if (argc > 0) {
for (int arg_i = 0; arg_i < argc; arg_i++) {
- int arg = invoke_dynamic_argument_index_at(index, arg_i);
+ int arg = bootstrap_argument_index_at(index, arg_i);
st->print((arg_i == 0 ? " arguments={%d" : ", %d"), arg);
}
st->print("}");
--- a/src/hotspot/share/oops/constantPool.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/oops/constantPool.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -246,16 +246,22 @@
// The invokedynamic points at a CP cache entry. This entry points back
// at the original CP entry (CONSTANT_InvokeDynamic) and also (via f2) at an entry
// in the resolved_references array (which provides the appendix argument).
- int invokedynamic_cp_cache_index(int index) const {
- assert (is_invokedynamic_index(index), "should be a invokedynamic index");
- int cache_index = decode_invokedynamic_index(index);
+ int invokedynamic_cp_cache_index(int indy_index) const {
+ assert(is_invokedynamic_index(indy_index), "should be a invokedynamic index");
+ int cache_index = decode_invokedynamic_index(indy_index);
return cache_index;
}
- ConstantPoolCacheEntry* invokedynamic_cp_cache_entry_at(int index) const {
+ ConstantPoolCacheEntry* invokedynamic_cp_cache_entry_at(int indy_index) const {
// decode index that invokedynamic points to.
- int cp_cache_index = invokedynamic_cp_cache_index(index);
+ int cp_cache_index = invokedynamic_cp_cache_index(indy_index);
return cache()->entry_at(cp_cache_index);
}
+ // Given the per-instruction index of an indy instruction, report the
+ // main constant pool entry for its bootstrap specifier.
+ // From there, uncached_name/signature_ref_at will get the name/type.
+ int invokedynamic_bootstrap_ref_index_at(int indy_index) const {
+ return invokedynamic_cp_cache_entry_at(indy_index)->constant_pool_index();
+ }
// Assembly code support
static int tags_offset_in_bytes() { return offset_of(ConstantPool, _tags); }
@@ -294,14 +300,14 @@
*int_at_addr(which) = ref_index;
}
- void dynamic_constant_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) {
+ void dynamic_constant_at_put(int which, int bsms_attribute_index, int name_and_type_index) {
tag_at_put(which, JVM_CONSTANT_Dynamic);
- *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index;
+ *int_at_addr(which) = ((jint) name_and_type_index<<16) | bsms_attribute_index;
}
- void invoke_dynamic_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) {
+ void invoke_dynamic_at_put(int which, int bsms_attribute_index, int name_and_type_index) {
tag_at_put(which, JVM_CONSTANT_InvokeDynamic);
- *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index;
+ *int_at_addr(which) = ((jint) name_and_type_index<<16) | bsms_attribute_index;
}
void unresolved_string_at_put(int which, Symbol* s) {
@@ -534,26 +540,22 @@
return symbol_at(sym);
}
- int invoke_dynamic_name_and_type_ref_index_at(int which) {
- assert(tag_at(which).is_invoke_dynamic() ||
- tag_at(which).is_dynamic_constant() ||
- tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
+ int bootstrap_name_and_type_ref_index_at(int which) {
+ assert(tag_at(which).has_bootstrap(), "Corrupted constant pool");
return extract_high_short_from_int(*int_at_addr(which));
}
- int invoke_dynamic_bootstrap_specifier_index(int which) {
- assert(tag_at(which).is_invoke_dynamic() ||
- tag_at(which).is_dynamic_constant() ||
- tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
+ int bootstrap_methods_attribute_index(int which) {
+ assert(tag_at(which).has_bootstrap(), "Corrupted constant pool");
return extract_low_short_from_int(*int_at_addr(which));
}
- int invoke_dynamic_operand_base(int which) {
- int bootstrap_specifier_index = invoke_dynamic_bootstrap_specifier_index(which);
- return operand_offset_at(operands(), bootstrap_specifier_index);
+ int bootstrap_operand_base(int which) {
+ int bsms_attribute_index = bootstrap_methods_attribute_index(which);
+ return operand_offset_at(operands(), bsms_attribute_index);
}
// The first part of the operands array consists of an index into the second part.
// Extract a 32-bit index value from the first part.
- static int operand_offset_at(Array<u2>* operands, int bootstrap_specifier_index) {
- int n = (bootstrap_specifier_index * 2);
+ static int operand_offset_at(Array<u2>* operands, int bsms_attribute_index) {
+ int n = (bsms_attribute_index * 2);
assert(n >= 0 && n+2 <= operands->length(), "oob");
// The first 32-bit index points to the beginning of the second part
// of the operands array. Make sure this index is in the first part.
@@ -566,8 +568,8 @@
assert(offset == 0 || offset >= second_part && offset <= operands->length(), "oob (3)");
return offset;
}
- static void operand_offset_at_put(Array<u2>* operands, int bootstrap_specifier_index, int offset) {
- int n = bootstrap_specifier_index * 2;
+ static void operand_offset_at_put(Array<u2>* operands, int bsms_attribute_index, int offset) {
+ int n = bsms_attribute_index * 2;
assert(n >= 0 && n+2 <= operands->length(), "oob");
operands->at_put(n+0, extract_low_short_from_int(offset));
operands->at_put(n+1, extract_high_short_from_int(offset));
@@ -580,20 +582,23 @@
#ifdef ASSERT
// operand tuples fit together exactly, end to end
- static int operand_limit_at(Array<u2>* operands, int bootstrap_specifier_index) {
- int nextidx = bootstrap_specifier_index + 1;
+ static int operand_limit_at(Array<u2>* operands, int bsms_attribute_index) {
+ int nextidx = bsms_attribute_index + 1;
if (nextidx == operand_array_length(operands))
return operands->length();
else
return operand_offset_at(operands, nextidx);
}
- int invoke_dynamic_operand_limit(int which) {
- int bootstrap_specifier_index = invoke_dynamic_bootstrap_specifier_index(which);
- return operand_limit_at(operands(), bootstrap_specifier_index);
+ int bootstrap_operand_limit(int which) {
+ int bsms_attribute_index = bootstrap_methods_attribute_index(which);
+ return operand_limit_at(operands(), bsms_attribute_index);
}
#endif //ASSERT
- // layout of InvokeDynamic and Dynamic bootstrap method specifier (in second part of operands array):
+ // Layout of InvokeDynamic and Dynamic bootstrap method specifier
+ // data in second part of operands array. This encodes one record in
+ // the BootstrapMethods attribute. The whole specifier also includes
+ // the name and type information from the main constant pool entry.
enum {
_indy_bsm_offset = 0, // CONSTANT_MethodHandle bsm
_indy_argc_offset = 1, // u2 argc
@@ -602,35 +607,35 @@
// These functions are used in RedefineClasses for CP merge
- int operand_offset_at(int bootstrap_specifier_index) {
- assert(0 <= bootstrap_specifier_index &&
- bootstrap_specifier_index < operand_array_length(operands()),
+ int operand_offset_at(int bsms_attribute_index) {
+ assert(0 <= bsms_attribute_index &&
+ bsms_attribute_index < operand_array_length(operands()),
"Corrupted CP operands");
- return operand_offset_at(operands(), bootstrap_specifier_index);
+ return operand_offset_at(operands(), bsms_attribute_index);
}
- int operand_bootstrap_method_ref_index_at(int bootstrap_specifier_index) {
- int offset = operand_offset_at(bootstrap_specifier_index);
+ int operand_bootstrap_method_ref_index_at(int bsms_attribute_index) {
+ int offset = operand_offset_at(bsms_attribute_index);
return operands()->at(offset + _indy_bsm_offset);
}
- int operand_argument_count_at(int bootstrap_specifier_index) {
- int offset = operand_offset_at(bootstrap_specifier_index);
+ int operand_argument_count_at(int bsms_attribute_index) {
+ int offset = operand_offset_at(bsms_attribute_index);
int argc = operands()->at(offset + _indy_argc_offset);
return argc;
}
- int operand_argument_index_at(int bootstrap_specifier_index, int j) {
- int offset = operand_offset_at(bootstrap_specifier_index);
+ int operand_argument_index_at(int bsms_attribute_index, int j) {
+ int offset = operand_offset_at(bsms_attribute_index);
return operands()->at(offset + _indy_argv_offset + j);
}
- int operand_next_offset_at(int bootstrap_specifier_index) {
- int offset = operand_offset_at(bootstrap_specifier_index) + _indy_argv_offset
- + operand_argument_count_at(bootstrap_specifier_index);
+ int operand_next_offset_at(int bsms_attribute_index) {
+ int offset = operand_offset_at(bsms_attribute_index) + _indy_argv_offset
+ + operand_argument_count_at(bsms_attribute_index);
return offset;
}
- // Compare a bootsrap specifier in the operands arrays
- bool compare_operand_to(int bootstrap_specifier_index1, const constantPoolHandle& cp2,
- int bootstrap_specifier_index2, TRAPS);
- // Find a bootsrap specifier in the operands array
- int find_matching_operand(int bootstrap_specifier_index, const constantPoolHandle& search_cp,
+ // Compare a bootstrap specifier data in the operands arrays
+ bool compare_operand_to(int bsms_attribute_index1, const constantPoolHandle& cp2,
+ int bsms_attribute_index2, TRAPS);
+ // Find a bootstrap specifier data in the operands array
+ int find_matching_operand(int bsms_attribute_index, const constantPoolHandle& search_cp,
int operands_cur_len, TRAPS);
// Resize the operands array with delta_len and delta_size
void resize_operands(int delta_len, int delta_size, TRAPS);
@@ -639,26 +644,22 @@
// Shrink the operands array to a smaller array with new_len length
void shrink_operands(int new_len, TRAPS);
- int invoke_dynamic_bootstrap_method_ref_index_at(int which) {
- assert(tag_at(which).is_invoke_dynamic() ||
- tag_at(which).is_dynamic_constant() ||
- tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
- int op_base = invoke_dynamic_operand_base(which);
+ int bootstrap_method_ref_index_at(int which) {
+ assert(tag_at(which).has_bootstrap(), "Corrupted constant pool");
+ int op_base = bootstrap_operand_base(which);
return operands()->at(op_base + _indy_bsm_offset);
}
- int invoke_dynamic_argument_count_at(int which) {
- assert(tag_at(which).is_invoke_dynamic() ||
- tag_at(which).is_dynamic_constant() ||
- tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
- int op_base = invoke_dynamic_operand_base(which);
+ int bootstrap_argument_count_at(int which) {
+ assert(tag_at(which).has_bootstrap(), "Corrupted constant pool");
+ int op_base = bootstrap_operand_base(which);
int argc = operands()->at(op_base + _indy_argc_offset);
DEBUG_ONLY(int end_offset = op_base + _indy_argv_offset + argc;
- int next_offset = invoke_dynamic_operand_limit(which));
+ int next_offset = bootstrap_operand_limit(which));
assert(end_offset == next_offset, "matched ending");
return argc;
}
- int invoke_dynamic_argument_index_at(int which, int j) {
- int op_base = invoke_dynamic_operand_base(which);
+ int bootstrap_argument_index_at(int which, int j) {
+ int op_base = bootstrap_operand_base(which);
DEBUG_ONLY(int argc = operands()->at(op_base + _indy_argc_offset));
assert((uint)j < (uint)argc, "oob");
return operands()->at(op_base + _indy_argv_offset + j);
@@ -796,8 +797,7 @@
static Method* method_at_if_loaded (const constantPoolHandle& this_cp, int which);
static bool has_appendix_at_if_loaded (const constantPoolHandle& this_cp, int which);
static oop appendix_at_if_loaded (const constantPoolHandle& this_cp, int which);
- static bool has_method_type_at_if_loaded (const constantPoolHandle& this_cp, int which);
- static oop method_type_at_if_loaded (const constantPoolHandle& this_cp, int which);
+ static bool has_local_signature_at_if_loaded (const constantPoolHandle& this_cp, int which);
static Klass* klass_at_if_loaded (const constantPoolHandle& this_cp, int which);
// Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
--- a/src/hotspot/share/oops/cpCache.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/oops/cpCache.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -392,23 +392,22 @@
const methodHandle adapter = call_info.resolved_method();
const Handle appendix = call_info.resolved_appendix();
- const Handle method_type = call_info.resolved_method_type();
const bool has_appendix = appendix.not_null();
- const bool has_method_type = method_type.not_null();
// Write the flags.
+ // MHs and indy are always sig-poly and have a local signature.
set_method_flags(as_TosState(adapter->result_type()),
- ((has_appendix ? 1 : 0) << has_appendix_shift ) |
- ((has_method_type ? 1 : 0) << has_method_type_shift) |
- ( 1 << is_final_shift ),
+ ((has_appendix ? 1 : 0) << has_appendix_shift ) |
+ ( 1 << has_local_signature_shift ) |
+ ( 1 << is_final_shift ),
adapter->size_of_parameters());
if (TraceInvokeDynamic) {
ttyLocker ttyl;
- tty->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method_type=" PTR_FORMAT "%s method=" PTR_FORMAT " ",
+ tty->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method=" PTR_FORMAT " (local signature) ",
invoke_code,
- p2i(appendix()), (has_appendix ? "" : " (unused)"),
- p2i(method_type()), (has_method_type ? "" : " (unused)"),
+ p2i(appendix()),
+ (has_appendix ? "" : " (unused)"),
p2i(adapter()));
adapter->print();
if (has_appendix) appendix()->print();
@@ -435,20 +434,12 @@
// Store appendix, if any.
if (has_appendix) {
- const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
+ const int appendix_index = f2_as_index();
assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
resolved_references->obj_at_put(appendix_index, appendix());
}
- // Store MethodType, if any.
- if (has_method_type) {
- const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;
- assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");
- assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");
- resolved_references->obj_at_put(method_type_index, method_type());
- }
-
release_set_f1(adapter()); // This must be the last one to set (see NOTE above)!
// The interpreter assembly code does not check byte_2,
@@ -459,6 +450,9 @@
ttyLocker ttyl;
this->print(tty, 0);
}
+
+ assert(has_appendix == this->has_appendix(), "proper storage of appendix flag");
+ assert(this->has_local_signature(), "proper storage of signature flag");
}
bool ConstantPoolCacheEntry::save_and_throw_indy_exc(
@@ -544,16 +538,7 @@
oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) {
if (!has_appendix())
return NULL;
- const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
- objArrayOop resolved_references = cpool->resolved_references();
- return resolved_references->obj_at(ref_index);
-}
-
-
-oop ConstantPoolCacheEntry::method_type_if_resolved(const constantPoolHandle& cpool) {
- if (!has_method_type())
- return NULL;
- const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
+ const int ref_index = f2_as_index();
objArrayOop resolved_references = cpool->resolved_references();
return resolved_references->obj_at(ref_index);
}
@@ -701,16 +686,7 @@
for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
const int cpci = invokedynamic_references_map.at(ref);
if (cpci >= 0) {
-#ifdef ASSERT
- // invokedynamic and invokehandle have more entries; check if they
- // all point to the same constant pool cache entry.
- for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
- const int cpci_next = invokedynamic_references_map.at(ref + entry);
- assert(cpci == cpci_next, "%d == %d", cpci, cpci_next);
- }
-#endif
entry_at(cpci)->initialize_resolved_reference_index(ref);
- ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1; // skip extra entries
}
}
}
--- a/src/hotspot/share/oops/cpCache.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/oops/cpCache.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -51,7 +51,7 @@
// _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr
// _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries)
// bit length [ 4 |1| 1 |1|1|1|1|1|1 |1 |-3-|----16-----]
-// _flags [tos|0|F=0|M|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries)
+// _flags [tos|0|F=0|S|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries)
// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--]
// --------------------------------
@@ -114,7 +114,7 @@
// _f2 = vtable/itable index (or final Method*) for virtual calls only,
// unused by non-virtual. The is_vfinal flag indicates this is a
// method pointer for a final method, not an index.
-// _flags = method type info (t section),
+// _flags = has local signature (MHs and indy),
// virtual final bit (vfinal),
// parameter size (psize section)
//
@@ -180,7 +180,7 @@
tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below
// misc. option bits; can be any bit position in [16..27]
is_field_entry_shift = 26, // (F) is it a field or a method?
- has_method_type_shift = 25, // (M) does the call site have a MethodType?
+ has_local_signature_shift = 25, // (S) does the call site have a per-site signature (sig-poly methods)?
has_appendix_shift = 24, // (A) does the call site have an appendix argument?
is_forced_virtual_shift = 23, // (I) is the interface reference forced to virtual mode?
is_final_shift = 22, // (f) is the field or method final?
@@ -291,19 +291,10 @@
bool save_and_throw_indy_exc(const constantPoolHandle& cpool, int cpool_index,
int index, constantTag tag, TRAPS);
- // invokedynamic and invokehandle call sites have two entries in the
- // resolved references array:
- // appendix (at index+0)
- // MethodType (at index+1)
- enum {
- _indy_resolved_references_appendix_offset = 0,
- _indy_resolved_references_method_type_offset = 1,
- _indy_resolved_references_entries
- };
-
+ // invokedynamic and invokehandle call sites have an "appendix" item in the
+ // resolved references array.
Method* method_if_resolved(const constantPoolHandle& cpool);
oop appendix_if_resolved(const constantPoolHandle& cpool);
- oop method_type_if_resolved(const constantPoolHandle& cpool);
void set_parameter_size(int value);
@@ -356,7 +347,7 @@
bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; }
bool indy_resolution_failed() const;
bool has_appendix() const;
- bool has_method_type() const;
+ bool has_local_signature() const;
bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; }
bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; }
bool is_long() const { return flag_state() == ltos; }
--- a/src/hotspot/share/oops/cpCache.inline.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/oops/cpCache.inline.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -71,8 +71,8 @@
return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0;
}
-inline bool ConstantPoolCacheEntry::has_method_type() const {
- return (!is_f1_null()) && (_flags & (1 << has_method_type_shift)) != 0;
+inline bool ConstantPoolCacheEntry::has_local_signature() const {
+ return (!is_f1_null()) && (_flags & (1 << has_local_signature_shift)) != 0;
}
inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)OrderAccess::load_acquire(&_flags); }
--- a/src/hotspot/share/opto/compile.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/compile.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -2112,7 +2112,7 @@
// PhaseIdealLoop is expensive so we only try it once we are
// out of live nodes and we only try it again if the previous
// helped got the number of nodes down significantly
- PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
+ PhaseIdealLoop::optimize(igvn, LoopOptsNone);
if (failing()) return;
low_live_nodes = live_nodes();
_major_progress = true;
@@ -2160,7 +2160,7 @@
while(major_progress() && (_loop_opts_cnt > 0)) {
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
assert( cnt++ < 40, "infinite cycle in loop optimization" );
- PhaseIdealLoop ideal_loop(igvn, mode);
+ PhaseIdealLoop::optimize(igvn, mode);
_loop_opts_cnt--;
if (failing()) return false;
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
@@ -2282,7 +2282,7 @@
if (has_loops()) {
// Cleanup graph (remove dead nodes).
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop(igvn, LoopOptsNone);
+ PhaseIdealLoop::optimize(igvn, LoopOptsNone);
if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
if (failing()) return;
}
@@ -2316,7 +2316,7 @@
if((_loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
{
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop(igvn, LoopOptsDefault);
+ PhaseIdealLoop::optimize(igvn, LoopOptsDefault);
_loop_opts_cnt--;
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
if (failing()) return;
@@ -2324,7 +2324,7 @@
// Loop opts pass if partial peeling occurred in previous pass
if(PartialPeelLoop && major_progress() && (_loop_opts_cnt > 0)) {
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
+ PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
_loop_opts_cnt--;
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
if (failing()) return;
@@ -2332,7 +2332,7 @@
// Loop opts pass for loop-unrolling before CCP
if(major_progress() && (_loop_opts_cnt > 0)) {
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
- PhaseIdealLoop ideal_loop(igvn, LoopOptsSkipSplitIf);
+ PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
_loop_opts_cnt--;
if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
}
--- a/src/hotspot/share/opto/graphKit.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/graphKit.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1489,18 +1489,19 @@
LoadNode::ControlDependency control_dependency,
bool require_atomic_access,
bool unaligned,
- bool mismatched) {
+ bool mismatched,
+ bool unsafe) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld;
if (require_atomic_access && bt == T_LONG) {
- ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
+ ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe);
} else if (require_atomic_access && bt == T_DOUBLE) {
- ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
+ ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe);
} else {
- ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
+ ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe);
}
ld = _gvn.transform(ld);
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
@@ -1515,7 +1516,8 @@
MemNode::MemOrd mo,
bool require_atomic_access,
bool unaligned,
- bool mismatched) {
+ bool mismatched,
+ bool unsafe) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
const TypePtr* adr_type = NULL;
debug_only(adr_type = C->get_adr_type(adr_idx));
@@ -1534,6 +1536,9 @@
if (mismatched) {
st->as_Store()->set_mismatched_access();
}
+ if (unsafe) {
+ st->as_Store()->set_unsafe_access();
+ }
st = _gvn.transform(st);
set_memory(st, adr_idx);
// Back-to-back stores can only remove intermediate store with DU info
--- a/src/hotspot/share/opto/graphKit.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/graphKit.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -518,27 +518,27 @@
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false, bool unaligned = false,
- bool mismatched = false) {
+ bool mismatched = false, bool unsafe = false) {
// This version computes alias_index from bottom_type
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
mo, control_dependency, require_atomic_access,
- unaligned, mismatched);
+ unaligned, mismatched, unsafe);
}
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false, bool unaligned = false,
- bool mismatched = false) {
+ bool mismatched = false, bool unsafe = false) {
// This version computes alias_index from an address type
assert(adr_type != NULL, "use other make_load factory");
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
mo, control_dependency, require_atomic_access,
- unaligned, mismatched);
+ unaligned, mismatched, unsafe);
}
// This is the base version which is given an alias index.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false, bool unaligned = false,
- bool mismatched = false);
+ bool mismatched = false, bool unsafe = false);
// Create & transform a StoreNode and store the effect into the
// parser's memory state.
@@ -553,7 +553,8 @@
MemNode::MemOrd mo,
bool require_atomic_access = false,
bool unaligned = false,
- bool mismatched = false) {
+ bool mismatched = false,
+ bool unsafe = false) {
// This version computes alias_index from an address type
assert(adr_type != NULL, "use other store_to_memory factory");
return store_to_memory(ctl, adr, val, bt,
@@ -568,7 +569,8 @@
MemNode::MemOrd,
bool require_atomic_access = false,
bool unaligned = false,
- bool mismatched = false);
+ bool mismatched = false,
+ bool unsafe = false);
// Perform decorated accesses
--- a/src/hotspot/share/opto/loopnode.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/loopnode.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -2712,8 +2712,6 @@
bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsLastRound);
bool skip_loop_opts = (mode == LoopOptsNone);
- ResourceMark rm;
-
int old_progress = C->major_progress();
uint orig_worklist_size = _igvn._worklist.size();
--- a/src/hotspot/share/opto/loopnode.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/loopnode.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -880,6 +880,42 @@
uint *_dom_depth; // Used for fast LCA test
GrowableArray<uint>* _dom_stk; // For recomputation of dom depth
+ // Perform verification that the graph is valid.
+ PhaseIdealLoop( PhaseIterGVN &igvn) :
+ PhaseTransform(Ideal_Loop),
+ _igvn(igvn),
+ _verify_me(NULL),
+ _verify_only(true),
+ _dom_lca_tags(arena()) { // Thread::resource_area
+ build_and_optimize(LoopOptsVerify);
+ }
+
+ // build the loop tree and perform any requested optimizations
+ void build_and_optimize(LoopOptsMode mode);
+
+ // Dominators for the sea of nodes
+ void Dominators();
+
+ // Compute the Ideal Node to Loop mapping
+ PhaseIdealLoop(PhaseIterGVN &igvn, LoopOptsMode mode) :
+ PhaseTransform(Ideal_Loop),
+ _igvn(igvn),
+ _verify_me(NULL),
+ _verify_only(false),
+ _dom_lca_tags(arena()) { // Thread::resource_area
+ build_and_optimize(mode);
+ }
+
+ // Verify that verify_me made the same decisions as a fresh run.
+ PhaseIdealLoop(PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) :
+ PhaseTransform(Ideal_Loop),
+ _igvn(igvn),
+ _verify_me(verify_me),
+ _verify_only(false),
+ _dom_lca_tags(arena()) { // Thread::resource_area
+ build_and_optimize(LoopOptsVerify);
+ }
+
public:
Node* idom_no_update(Node* d) const {
return idom_no_update(d->_idx);
@@ -923,54 +959,27 @@
// Replace parallel induction variable (parallel to trip counter)
void replace_parallel_iv(IdealLoopTree *loop);
- // Perform verification that the graph is valid.
- PhaseIdealLoop( PhaseIterGVN &igvn) :
- PhaseTransform(Ideal_Loop),
- _igvn(igvn),
- _verify_me(NULL),
- _verify_only(true),
- _dom_lca_tags(arena()) { // Thread::resource_area
- build_and_optimize(LoopOptsVerify);
- }
-
- // build the loop tree and perform any requested optimizations
- void build_and_optimize(LoopOptsMode mode);
-
- // Dominators for the sea of nodes
- void Dominators();
Node *dom_lca( Node *n1, Node *n2 ) const {
return find_non_split_ctrl(dom_lca_internal(n1, n2));
}
Node *dom_lca_internal( Node *n1, Node *n2 ) const;
- // Compute the Ideal Node to Loop mapping
- PhaseIdealLoop(PhaseIterGVN &igvn, LoopOptsMode mode) :
- PhaseTransform(Ideal_Loop),
- _igvn(igvn),
- _verify_me(NULL),
- _verify_only(false),
- _dom_lca_tags(arena()) { // Thread::resource_area
- build_and_optimize(mode);
- }
-
- // Verify that verify_me made the same decisions as a fresh run.
- PhaseIdealLoop(PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) :
- PhaseTransform(Ideal_Loop),
- _igvn(igvn),
- _verify_me(verify_me),
- _verify_only(false),
- _dom_lca_tags(arena()) { // Thread::resource_area
- build_and_optimize(LoopOptsVerify);
- }
-
// Build and verify the loop tree without modifying the graph. This
// is useful to verify that all inputs properly dominate their uses.
static void verify(PhaseIterGVN& igvn) {
#ifdef ASSERT
+ ResourceMark rm;
PhaseIdealLoop v(igvn);
#endif
}
+ // Recommended way to use PhaseIdealLoop.
+ // Run PhaseIdealLoop in some mode and allocates a local scope for memory allocations.
+ static void optimize(PhaseIterGVN &igvn, LoopOptsMode mode) {
+ ResourceMark rm;
+ PhaseIdealLoop v(igvn, mode);
+ }
+
// True if the method has at least 1 irreducible loop
bool _has_irreducible_loops;
--- a/src/hotspot/share/opto/memnode.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/memnode.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -99,6 +99,9 @@
if (_mismatched_access) {
st->print(" mismatched");
}
+ if (_unsafe_access) {
+ st->print(" unsafe");
+ }
}
void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
@@ -789,7 +792,7 @@
//----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method:
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo,
- ControlDependency control_dependency, bool unaligned, bool mismatched) {
+ ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
Compile* C = gvn.C;
// sanity check the alias category against the created node type
@@ -837,6 +840,9 @@
if (mismatched) {
load->set_mismatched_access();
}
+ if (unsafe) {
+ load->set_unsafe_access();
+ }
if (load->Opcode() == Op_LoadN) {
Node* ld = gvn.transform(load);
return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
@@ -846,7 +852,7 @@
}
LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
- ControlDependency control_dependency, bool unaligned, bool mismatched) {
+ ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
bool require_atomic = true;
LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
if (unaligned) {
@@ -855,11 +861,14 @@
if (mismatched) {
load->set_mismatched_access();
}
+ if (unsafe) {
+ load->set_unsafe_access();
+ }
return load;
}
LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
- ControlDependency control_dependency, bool unaligned, bool mismatched) {
+ ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
bool require_atomic = true;
LoadDNode* load = new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
if (unaligned) {
@@ -868,6 +877,9 @@
if (mismatched) {
load->set_mismatched_access();
}
+ if (unsafe) {
+ load->set_unsafe_access();
+ }
return load;
}
@@ -978,7 +990,8 @@
Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
Node* ld_adr = in(MemNode::Address);
intptr_t ld_off = 0;
- AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
+ Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
+ Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base, phase);
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
// This is more general than load from boxing objects.
@@ -1031,16 +1044,21 @@
if (st->is_Store()) {
Node* st_adr = st->in(MemNode::Address);
if (!phase->eqv(st_adr, ld_adr)) {
- // Try harder before giving up... Match raw and non-raw pointers.
+ // Try harder before giving up. Unify base pointers with casts (e.g., raw/non-raw pointers).
intptr_t st_off = 0;
- AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off);
- if (alloc == NULL) return NULL;
- if (alloc != ld_alloc) return NULL;
- if (ld_off != st_off) return NULL;
+ Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_off);
+ if (ld_base == NULL) return NULL;
+ if (st_base == NULL) return NULL;
+ if (ld_base->uncast() != st_base->uncast()) return NULL;
+ if (ld_off != st_off) return NULL;
+ if (ld_off == Type::OffsetBot) return NULL;
+ // Same base, same offset.
+ // Possible improvement for arrays: check index value instead of absolute offset.
+
// At this point we have proven something like this setup:
- // A = Allocate(...)
- // L = LoadQ(, AddP(CastPP(, A.Parm),, #Off))
- // S = StoreQ(, AddP(, A.Parm , #Off), V)
+ // B = << base >>
+ // L = LoadQ(AddP(Check/CastPP(B), #Off))
+ // S = StoreQ(AddP( B , #Off), V)
// (Actually, we haven't yet proven the Q's are the same.)
// In other words, we are loading from a casted version of
// the same pointer-and-offset that we stored to.
--- a/src/hotspot/share/opto/memnode.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/memnode.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -42,6 +42,7 @@
private:
bool _unaligned_access; // Unaligned access from unsafe
bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
+ bool _unsafe_access; // Access of unsafe origin.
protected:
#ifdef ASSERT
const TypePtr* _adr_type; // What kind of memory is being addressed?
@@ -62,17 +63,17 @@
} MemOrd;
protected:
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
- : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) {
+ : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
- : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
+ : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
- : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
+ : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
@@ -137,6 +138,8 @@
bool is_unaligned_access() const { return _unaligned_access; }
void set_mismatched_access() { _mismatched_access = true; }
bool is_mismatched_access() const { return _mismatched_access; }
+ void set_unsafe_access() { _unsafe_access = true; }
+ bool is_unsafe_access() const { return _unsafe_access; }
#ifndef PRODUCT
static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
@@ -207,7 +210,7 @@
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, const Type *rt, BasicType bt,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
- bool unaligned = false, bool mismatched = false);
+ bool unaligned = false, bool mismatched = false, bool unsafe = false);
virtual uint hash() const; // Check the type
@@ -388,7 +391,7 @@
bool require_atomic_access() const { return _require_atomic_access; }
static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
- bool unaligned = false, bool mismatched = false);
+ bool unaligned = false, bool mismatched = false, bool unsafe = false);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
@@ -440,7 +443,7 @@
bool require_atomic_access() const { return _require_atomic_access; }
static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
- bool unaligned = false, bool mismatched = false);
+ bool unaligned = false, bool mismatched = false, bool unsafe = false);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
--- a/src/hotspot/share/opto/reg_split.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/opto/reg_split.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1192,9 +1192,8 @@
(deflrg._direct_conflict || deflrg._must_spill)) ||
// Check for LRG being up in a register and we are inside a high
// pressure area. Spill it down immediately.
- (defup && is_high_pressure(b,&deflrg,insidx))) ) {
+ (defup && is_high_pressure(b,&deflrg,insidx) && !n->is_SpillCopy())) ) {
assert( !n->rematerialize(), "" );
- assert( !n->is_SpillCopy(), "" );
// Do a split at the def site.
maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx );
// If it wasn't split bail
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -512,11 +512,11 @@
case JVM_CONSTANT_InvokeDynamic:
{
// Index of the bootstrap specifier in the operands array
- int old_bs_i = scratch_cp->invoke_dynamic_bootstrap_specifier_index(scratch_i);
+ int old_bs_i = scratch_cp->bootstrap_methods_attribute_index(scratch_i);
int new_bs_i = find_or_append_operand(scratch_cp, old_bs_i, merge_cp_p,
merge_cp_length_p, THREAD);
// The bootstrap method NameAndType_info index
- int old_ref_i = scratch_cp->invoke_dynamic_name_and_type_ref_index_at(scratch_i);
+ int old_ref_i = scratch_cp->bootstrap_name_and_type_ref_index_at(scratch_i);
int new_ref_i = find_or_append_indirect_entry(scratch_cp, old_ref_i, merge_cp_p,
merge_cp_length_p, THREAD);
if (new_bs_i != old_bs_i) {
--- a/src/hotspot/share/prims/methodComparator.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/prims/methodComparator.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -123,17 +123,17 @@
int cpi_old = _old_cp->cache()->entry_at(cpci_old)->constant_pool_index();
int cpi_new = _new_cp->cache()->entry_at(cpci_new)->constant_pool_index();
- int bsm_old = _old_cp->invoke_dynamic_bootstrap_method_ref_index_at(cpi_old);
- int bsm_new = _new_cp->invoke_dynamic_bootstrap_method_ref_index_at(cpi_new);
+ int bsm_old = _old_cp->bootstrap_method_ref_index_at(cpi_old);
+ int bsm_new = _new_cp->bootstrap_method_ref_index_at(cpi_new);
if (!pool_constants_same(bsm_old, bsm_new))
return false;
- int cnt_old = _old_cp->invoke_dynamic_argument_count_at(cpi_old);
- int cnt_new = _new_cp->invoke_dynamic_argument_count_at(cpi_new);
+ int cnt_old = _old_cp->bootstrap_argument_count_at(cpi_old);
+ int cnt_new = _new_cp->bootstrap_argument_count_at(cpi_new);
if (cnt_old != cnt_new)
return false;
for (int arg_i = 0; arg_i < cnt_old; arg_i++) {
- int idx_old = _old_cp->invoke_dynamic_argument_index_at(cpi_old, arg_i);
- int idx_new = _new_cp->invoke_dynamic_argument_index_at(cpi_new, arg_i);
+ int idx_old = _old_cp->bootstrap_argument_index_at(cpi_old, arg_i);
+ int idx_new = _new_cp->bootstrap_argument_index_at(cpi_new, arg_i);
if (!pool_constants_same(idx_old, idx_new))
return false;
}
--- a/src/hotspot/share/prims/methodHandles.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/prims/methodHandles.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1427,7 +1427,7 @@
if (bss_index_in_pool <= 0 ||
bss_index_in_pool >= caller->constants()->length() ||
index_info->int_at(0)
- != caller->constants()->invoke_dynamic_argument_count_at(bss_index_in_pool)) {
+ != caller->constants()->bootstrap_argument_count_at(bss_index_in_pool)) {
THROW_MSG(vmSymbols::java_lang_InternalError(), "bad index info (1)");
}
objArrayHandle buf(THREAD, (objArrayOop) JNIHandles::resolve(buf_jh));
@@ -1439,7 +1439,7 @@
switch (pseudo_index) {
case -4: // bootstrap method
{
- int bsm_index = caller->constants()->invoke_dynamic_bootstrap_method_ref_index_at(bss_index_in_pool);
+ int bsm_index = caller->constants()->bootstrap_method_ref_index_at(bss_index_in_pool);
pseudo_arg = caller->constants()->resolve_possibly_cached_constant_at(bsm_index, CHECK);
break;
}
@@ -1464,7 +1464,7 @@
}
case -1: // argument count
{
- int argc = caller->constants()->invoke_dynamic_argument_count_at(bss_index_in_pool);
+ int argc = caller->constants()->bootstrap_argument_count_at(bss_index_in_pool);
jvalue argc_value; argc_value.i = (jint)argc;
pseudo_arg = java_lang_boxing_object::create(T_INT, &argc_value, CHECK);
break;
--- a/src/hotspot/share/runtime/arguments.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/arguments.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -528,6 +528,7 @@
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "UseMembar", JDK_Version::jdk(10), JDK_Version::jdk(12), JDK_Version::undefined() },
+ { "CompilationPolicyChoice", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::undefined() },
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
@@ -546,6 +547,7 @@
{ "ProfileVM", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
{ "ProfileIntervals", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
{ "ProfileIntervalsTicks", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
+ { "ProfilerCheckIntervals", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
{ "ProfilerNumberOfInterpretedMethods", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
{ "ProfilerNumberOfCompiledMethods", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
{ "ProfilerNumberOfStubMethods", JDK_Version::undefined(), JDK_Version::jdk(13), JDK_Version::jdk(14) },
--- a/src/hotspot/share/runtime/globals.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/globals.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -749,9 +749,6 @@
product(bool, OmitStackTraceInFastThrow, true, \
"Omit backtraces for some 'hot' exceptions in optimized code") \
\
- notproduct(bool, ProfilerCheckIntervals, false, \
- "Collect and print information on spacing of profiler ticks") \
- \
product(bool, PrintWarnings, true, \
"Print JVM warnings to output stream") \
\
--- a/src/hotspot/share/runtime/handshake.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/handshake.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -303,13 +303,9 @@
}
bool HandshakeState::vmthread_can_process_handshake(JavaThread* target) {
- // SafepointSynchronize::safepoint_safe() does not consider an externally
- // suspended thread to be safe. However, this function must be called with
- // the Threads_lock held so an externally suspended thread cannot be
- // resumed thus it is safe.
- assert(Threads_lock->owned_by_self(), "Not holding Threads_lock.");
- return SafepointSynchronize::safepoint_safe(target, target->thread_state()) ||
- target->is_ext_suspended() || target->is_terminated();
+ // handshake_safe may only be called with polls armed.
+ // VM thread controls this by first claiming the handshake via claim_handshake_for_vmthread.
+ return SafepointSynchronize::handshake_safe(target);
}
static bool possibly_vmthread_can_process_handshake(JavaThread* target) {
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -314,10 +314,10 @@
// Once we are blocked vm expects stack to be walkable
thread->frame_anchor()->make_walkable(thread);
- thread->set_thread_state((JavaThreadState)(_thread_in_vm + 1));
- InterfaceSupport::serialize_thread_state_with_handler(thread);
-
- SafepointMechanism::callback_if_safepoint(thread);
+ // All unsafe states are treated the same by the VMThread
+ // so we can skip the _thread_in_vm_trans state here. Since
+ // we don't read poll, it's enough to order the stores.
+ OrderAccess::storestore();
thread->set_thread_state(_thread_blocked);
@@ -325,23 +325,13 @@
}
~ThreadBlockInVMWithDeadlockCheck() {
// Change to transition state
- _thread->set_thread_state((JavaThreadState)(_thread_blocked + 1));
+ _thread->set_thread_state((JavaThreadState)(_thread_blocked_trans));
InterfaceSupport::serialize_thread_state_with_handler(_thread);
if (SafepointMechanism::should_block(_thread)) {
release_monitor();
- SafepointMechanism::callback_if_safepoint(_thread);
- // The VMThread might have read that we were in a _thread_blocked state
- // and proceeded to process a handshake for us. If that's the case then
- // we need to block.
- // By doing this we are also making the current thread process its own
- // handshake if there is one pending and the VMThread didn't try to process
- // it yet. This is more of a side-effect and not really necessary; the
- // handshake could be processed later on.
- if (_thread->has_handshake()) {
- _thread->handshake_process_by_self();
- }
+ SafepointMechanism::block_if_requested(_thread);
}
_thread->set_thread_state(_thread_in_vm);
--- a/src/hotspot/share/runtime/java.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/java.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -291,9 +291,6 @@
if (TimeOopMap) {
GenerateOopMap::print_time();
}
- if (ProfilerCheckIntervals) {
- PeriodicTask::print_intervals();
- }
if (PrintSymbolTableSizeHistogram) {
SymbolTable::print_histogram();
}
--- a/src/hotspot/share/runtime/mutex.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/mutex.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -401,15 +401,10 @@
// of m2 be less than the rank of m1.
// The rank Mutex::native is an exception in that it is not subject
// to the verification rules.
- // Here are some further notes relating to mutex acquisition anomalies:
- // . it is also ok to acquire Safepoint_lock at the very end while we
- // already hold Terminator_lock - may happen because of periodic safepoints
if (this->rank() != Mutex::native &&
this->rank() != Mutex::suspend_resume &&
locks != NULL && locks->rank() <= this->rank() &&
- !SafepointSynchronize::is_at_safepoint() &&
- !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
- SafepointSynchronize::is_synchronizing())) {
+ !SafepointSynchronize::is_at_safepoint()) {
new_owner->print_owned_locks();
fatal("acquiring lock %s/%d out of order with lock %s/%d -- "
"possible deadlock", this->name(), this->rank(),
--- a/src/hotspot/share/runtime/mutex.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/mutex.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -56,10 +56,7 @@
// (except for "event" and "access") for the deadlock detection to work correctly.
// The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
// which being external to the VM are not subject to deadlock detection.
- // The rank safepoint is used only for synchronization in reaching a
- // safepoint and leaving a safepoint. It is only used for the Safepoint_lock
- // currently. While at a safepoint no mutexes of rank safepoint are held
- // by any thread.
+ // While at a safepoint no mutexes of rank safepoint are held by any thread.
// The rank named "leaf" is probably historical (and should
// be changed) -- mutexes of this rank aren't really leaf mutexes
// at all.
--- a/src/hotspot/share/runtime/mutexLocker.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/mutexLocker.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -72,7 +72,6 @@
Mutex* RetData_lock = NULL;
Monitor* VMOperationQueue_lock = NULL;
Monitor* VMOperationRequest_lock = NULL;
-Monitor* Safepoint_lock = NULL;
Monitor* SerializePage_lock = NULL;
Monitor* Threads_lock = NULL;
Mutex* NonJavaThreadsList_lock = NULL;
@@ -275,8 +274,6 @@
// CMS_bitMap_lock leaf 1
// CMS_freeList_lock leaf 2
- def(Safepoint_lock , PaddedMonitor, safepoint, true, Monitor::_safepoint_check_sometimes); // locks SnippetCache_lock/Threads_lock
-
def(Threads_lock , PaddedMonitor, barrier, true, Monitor::_safepoint_check_sometimes);
def(NonJavaThreadsList_lock , PaddedMutex, leaf, true, Monitor::_safepoint_check_never);
--- a/src/hotspot/share/runtime/mutexLocker.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/mutexLocker.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -68,7 +68,6 @@
extern Monitor* CGCPhaseManager_lock; // a lock to protect a concurrent GC's phase management
extern Monitor* VMOperationQueue_lock; // a lock on queue of vm_operations waiting to execute
extern Monitor* VMOperationRequest_lock; // a lock on Threads waiting for a vm_operation to terminate
-extern Monitor* Safepoint_lock; // a lock used by the safepoint abstraction
extern Monitor* Threads_lock; // a lock on the Threads table of active Java threads
// (also used by Safepoints too to block threads creation/destruction)
extern Mutex* NonJavaThreadsList_lock; // a lock on the NonJavaThreads list
--- a/src/hotspot/share/runtime/safepoint.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/safepoint.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -70,70 +70,63 @@
#include "c1/c1_globals.hpp"
#endif
-template <typename E>
-static void set_current_safepoint_id(E* event, int adjustment = 0) {
- assert(event != NULL, "invariant");
- event->set_safepointId(SafepointSynchronize::safepoint_counter() + adjustment);
-}
-
-static void post_safepoint_begin_event(EventSafepointBegin* event,
+static void post_safepoint_begin_event(EventSafepointBegin& event,
+ uint64_t safepoint_id,
int thread_count,
int critical_thread_count) {
- assert(event != NULL, "invariant");
- assert(event->should_commit(), "invariant");
- set_current_safepoint_id(event);
- event->set_totalThreadCount(thread_count);
- event->set_jniCriticalThreadCount(critical_thread_count);
- event->commit();
+ if (event.should_commit()) {
+ event.set_safepointId(safepoint_id);
+ event.set_totalThreadCount(thread_count);
+ event.set_jniCriticalThreadCount(critical_thread_count);
+ event.commit();
+ }
}
-static void post_safepoint_cleanup_event(EventSafepointCleanup* event) {
- assert(event != NULL, "invariant");
- assert(event->should_commit(), "invariant");
- set_current_safepoint_id(event);
- event->commit();
-}
-
-static void post_safepoint_synchronize_event(EventSafepointStateSynchronization* event,
- int initial_number_of_threads,
- int threads_waiting_to_block,
- unsigned int iterations) {
- assert(event != NULL, "invariant");
- if (event->should_commit()) {
- // Group this event together with the ones committed after the counter is increased
- set_current_safepoint_id(event, 1);
- event->set_initialThreadCount(initial_number_of_threads);
- event->set_runningThreadCount(threads_waiting_to_block);
- event->set_iterations(iterations);
- event->commit();
+static void post_safepoint_cleanup_event(EventSafepointCleanup& event, uint64_t safepoint_id) {
+ if (event.should_commit()) {
+ event.set_safepointId(safepoint_id);
+ event.commit();
}
}
-static void post_safepoint_wait_blocked_event(EventSafepointWaitBlocked* event,
- int initial_threads_waiting_to_block) {
- assert(event != NULL, "invariant");
- assert(event->should_commit(), "invariant");
- set_current_safepoint_id(event);
- event->set_runningThreadCount(initial_threads_waiting_to_block);
- event->commit();
-}
-
-static void post_safepoint_cleanup_task_event(EventSafepointCleanupTask* event,
- const char* name) {
- assert(event != NULL, "invariant");
- if (event->should_commit()) {
- set_current_safepoint_id(event);
- event->set_name(name);
- event->commit();
+static void post_safepoint_synchronize_event(EventSafepointStateSynchronization& event,
+ uint64_t safepoint_id,
+ int initial_number_of_threads,
+ int threads_waiting_to_block,
+ uint64_t iterations) {
+ if (event.should_commit()) {
+ event.set_safepointId(safepoint_id);
+ event.set_initialThreadCount(initial_number_of_threads);
+ event.set_runningThreadCount(threads_waiting_to_block);
+ event.set_iterations(iterations);
+ event.commit();
}
}
-static void post_safepoint_end_event(EventSafepointEnd* event) {
- assert(event != NULL, "invariant");
- if (event->should_commit()) {
- // Group this event together with the ones committed before the counter increased
- set_current_safepoint_id(event, -1);
- event->commit();
+static void post_safepoint_wait_blocked_event(EventSafepointWaitBlocked& event,
+ uint64_t safepoint_id,
+ int initial_threads_waiting_to_block) {
+ if (event.should_commit()) {
+ event.set_safepointId(safepoint_id);
+ event.set_runningThreadCount(initial_threads_waiting_to_block);
+ event.commit();
+ }
+}
+
+static void post_safepoint_cleanup_task_event(EventSafepointCleanupTask& event,
+ uint64_t safepoint_id,
+ const char* name) {
+ if (event.should_commit()) {
+ event.set_safepointId(safepoint_id);
+ event.set_name(name);
+ event.commit();
+ }
+}
+
+static void post_safepoint_end_event(EventSafepointEnd& event, uint64_t safepoint_id) {
+ if (event.should_commit()) {
+ event.set_safepointId(safepoint_id);
+ event.commit();
}
}
@@ -141,64 +134,170 @@
// Implementation of Safepoint begin/end
SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
-volatile int SafepointSynchronize::_waiting_to_block = 0;
+int SafepointSynchronize::_waiting_to_block = 0;
volatile uint64_t SafepointSynchronize::_safepoint_counter = 0;
+const uint64_t SafepointSynchronize::InactiveSafepointCounter = 0;
int SafepointSynchronize::_current_jni_active_count = 0;
-long SafepointSynchronize::_end_of_last_safepoint = 0;
-int SafepointSynchronize::_defer_thr_suspend_loop_count = 4000;
-static const int safepoint_spin_before_yield = 2000;
-static volatile int PageArmed = 0 ; // safepoint polling page is RO|RW vs PROT_NONE
-static volatile int TryingToBlock = 0 ; // proximate value -- for advisory use only
+long SafepointSynchronize::_end_of_last_safepoint = 0;
+
+WaitBarrier* SafepointSynchronize::_wait_barrier;
+
+// We need a place to save the desc since it is released before we need it.
+static char stopped_description[64] = "";
+static bool _vm_is_waiting = false;
+
+static volatile bool PageArmed = false; // safepoint polling page is RO|RW vs PROT_NONE
static bool timeout_error_printed = false;
-
-// Statistic related statics
+// Statistic related
julong SafepointSynchronize::_coalesced_vmop_count = 0;
static jlong _safepoint_begin_time = 0;
static float _ts_of_current_safepoint = 0.0f;
static volatile int _nof_threads_hit_polling_page = 0;
-// Roll all threads forward to a safepoint and suspend them all
-void SafepointSynchronize::begin() {
- EventSafepointBegin begin_event;
- Thread* myThread = Thread::current();
- assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
+void SafepointSynchronize::init(Thread* vmthread) {
+ // WaitBarrier should never be destroyed since we will have
+ // threads waiting on it while exiting.
+ _wait_barrier = new WaitBarrier(vmthread);
+}
+
+void SafepointSynchronize::increment_jni_active_count() {
+ assert(Thread::current()->is_VM_thread(), "Only VM thread may increment");
+ ++_current_jni_active_count;
+}
+
+void SafepointSynchronize::decrement_waiting_to_block() {
+ assert(_waiting_to_block > 0, "sanity check");
+ assert(Thread::current()->is_VM_thread(), "Only VM thread may decrement");
+ --_waiting_to_block;
+}
+
+static bool thread_not_running(ThreadSafepointState *cur_state) {
+ if (!cur_state->is_running()) {
+ return true;
+ }
+ cur_state->examine_state_of_thread(SafepointSynchronize::safepoint_counter());
+ if (!cur_state->is_running()) {
+ return true;
+ }
+ LogTarget(Trace, safepoint) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ cur_state->print_on(&ls);
+ }
+ return false;
+}
+#ifdef ASSERT
+static void assert_list_is_valid(const ThreadSafepointState* tss_head, int still_running) {
+ int a = 0;
+ const ThreadSafepointState *tmp_tss = tss_head;
+ while (tmp_tss != NULL) {
+ ++a;
+ assert(tmp_tss->is_running(), "Illegal initial state");
+ tmp_tss = tmp_tss->get_next();
+ }
+ assert(a == still_running, "Must be the same");
+}
+#endif // ASSERT
+
+static void back_off(int iteration) {
+ // iteration will be 1 the first time we enter this spin back-off.
+ // naked_short_nanosleep takes tenths of micros which means that
+ // number of nanoseconds is irrelevant if it's below that. We do
+ // 20 1 ns sleeps with a total cost of ~1 ms, then we do 1 ms sleeps.
+ jlong sleep_ns = 1;
+ if (iteration > 20) {
+ sleep_ns = NANOUNITS / MILLIUNITS; // 1 ms
+ }
+ os::naked_short_nanosleep(sleep_ns);
+}
+
+int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int nof_threads, int* initial_running)
+{
+ JavaThreadIteratorWithHandle jtiwh;
+
+#ifdef ASSERT
+ for (; JavaThread *cur = jtiwh.next(); ) {
+ assert(cur->safepoint_state()->is_running(), "Illegal initial state");
+ }
+ jtiwh.rewind();
+#endif // ASSERT
+
+ // Iterate through all threads until it has been determined how to stop them all at a safepoint.
+ int still_running = nof_threads;
+ ThreadSafepointState *tss_head = NULL;
+ ThreadSafepointState **p_prev = &tss_head;
+ for (; JavaThread *cur = jtiwh.next(); ) {
+ ThreadSafepointState *cur_tss = cur->safepoint_state();
+ assert(cur_tss->get_next() == NULL, "Must be NULL");
+ if (thread_not_running(cur_tss)) {
+ --still_running;
+ } else {
+ *p_prev = cur_tss;
+ p_prev = cur_tss->next_ptr();
+ }
+ }
+ *p_prev = NULL;
+
+ DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
+
+ *initial_running = still_running;
if (log_is_enabled(Debug, safepoint, stats)) {
- _safepoint_begin_time = os::javaTimeNanos();
- _ts_of_current_safepoint = tty->time_stamp().seconds();
- _nof_threads_hit_polling_page = 0;
+ begin_statistics(nof_threads, still_running);
}
- Universe::heap()->safepoint_synchronize_begin();
-
- // By getting the Threads_lock, we assure that no threads are about to start or
- // exit. It is released again in SafepointSynchronize::end().
- Threads_lock->lock();
+ int iterations = 1; // The first iteration is above.
- assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
-
- int nof_threads = Threads::number_of_threads();
-
- log_debug(safepoint)("Safepoint synchronization initiated. (%d threads)", nof_threads);
-
- RuntimeService::record_safepoint_begin();
+ while (still_running > 0) {
+ // Check if this has taken too long:
+ if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
+ print_safepoint_timeout(_spinning_timeout);
+ }
+ if (int(iterations) == -1) { // overflow - something is wrong.
+ // We can only overflow here when we are using global
+ // polling pages. We keep this guarantee in its original
+ // form so that searches of the bug database for this
+ // failure mode find the right bugs.
+ guarantee (!PageArmed, "invariant");
+ }
- MutexLocker mu(Safepoint_lock);
-
- // Reset the count of active JNI critical threads
- _current_jni_active_count = 0;
+ p_prev = &tss_head;
+ ThreadSafepointState *cur_tss = tss_head;
+ while (cur_tss != NULL) {
+ assert(cur_tss->is_running(), "Illegal initial state");
+ if (thread_not_running(cur_tss)) {
+ --still_running;
+ *p_prev = NULL;
+ ThreadSafepointState *tmp = cur_tss;
+ cur_tss = cur_tss->get_next();
+ tmp->set_next(NULL);
+ } else {
+ *p_prev = cur_tss;
+ p_prev = cur_tss->next_ptr();
+ cur_tss = cur_tss->get_next();
+ }
+ }
- // Set number of threads to wait for, before we initiate the callbacks
- _waiting_to_block = nof_threads;
- TryingToBlock = 0 ;
- int still_running = nof_threads;
+ DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
+
+ if (still_running > 0) {
+ back_off(iterations);
+ }
+
+ iterations++;
+ }
- // Save the starting time, so that it can be compared to see if this has taken
- // too long to complete.
- jlong safepoint_limit_time = 0;
- timeout_error_printed = false;
+ assert(tss_head == NULL, "Must be empty");
+ if (log_is_enabled(Debug, safepoint, stats)) {
+ update_statistics_on_spin_end();
+ }
+ return iterations;
+}
+
+void SafepointSynchronize::arm_safepoint() {
// Begin the process of bringing the system to a safepoint.
// Java threads can be in several different states and are
// stopped by different mechanisms:
@@ -216,7 +315,7 @@
// memory writes are serialized with respect to each other,
// the VM thread issues a memory barrier instruction.
// 3. Running compiled Code
- // Compiled code reads a global (Safepoint Polling) page that
+ // Compiled code reads the local polling page that
// is set to fault if we are trying to get to a safepoint.
// 4. Blocked
// A thread which is blocked will not be allowed to return from the
@@ -226,275 +325,154 @@
// between states, the safepointing code will wait for the thread to
// block itself when it attempts transitions to a new state.
//
- {
- EventSafepointStateSynchronization sync_event;
- int initial_running = 0;
-
- _state = _synchronizing;
-
- if (SafepointMechanism::uses_thread_local_poll()) {
- // Arming the per thread poll while having _state != _not_synchronized means safepointing
- log_trace(safepoint)("Setting thread local yield flag for threads");
- OrderAccess::storestore(); // storestore, global state -> local state
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
- // Make sure the threads start polling, it is time to yield.
- SafepointMechanism::arm_local_poll(cur);
- }
- }
- OrderAccess::fence(); // storestore|storeload, global state -> local state
-
- if (SafepointMechanism::uses_global_page_poll()) {
- // Make interpreter safepoint aware
- Interpreter::notice_safepoints();
-
- // Make polling safepoint aware
- guarantee (PageArmed == 0, "invariant") ;
- PageArmed = 1 ;
- os::make_polling_page_unreadable();
- }
-
- // Consider using active_processor_count() ... but that call is expensive.
- int ncpus = os::processor_count() ;
- unsigned int iterations = 0;
-
- {
- JavaThreadIteratorWithHandle jtiwh;
-#ifdef ASSERT
- for (; JavaThread *cur = jtiwh.next(); ) {
- assert(cur->safepoint_state()->is_running(), "Illegal initial state");
- // Clear the visited flag to ensure that the critical counts are collected properly.
- cur->set_visited_for_critical_count(false);
- }
-#endif // ASSERT
- if (SafepointTimeout)
- safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
+ // We must never miss a thread with correct safepoint id, so we must make sure we arm
+ // the wait barrier for the next safepoint id/counter.
+ // Arming must be done after resetting _current_jni_active_count, _waiting_to_block.
+ _wait_barrier->arm(static_cast<int>(_safepoint_counter + 1));
- // Iterate through all threads until it have been determined how to stop them all at a safepoint
- int steps = 0 ;
- while(still_running > 0) {
- jtiwh.rewind();
- for (; JavaThread *cur = jtiwh.next(); ) {
- assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
- ThreadSafepointState *cur_state = cur->safepoint_state();
- if (cur_state->is_running()) {
- cur_state->examine_state_of_thread();
- if (!cur_state->is_running()) {
- still_running--;
- // consider adjusting steps downward:
- // steps = 0
- // steps -= NNN
- // steps >>= 1
- // steps = MIN(steps, 2000-100)
- // if (iterations != 0) steps -= NNN
- }
- LogTarget(Trace, safepoint) lt;
- if (lt.is_enabled()) {
- ResourceMark rm;
- LogStream ls(lt);
- cur_state->print_on(&ls);
- }
- }
- }
-
- if (iterations == 0) {
- initial_running = still_running;
- if (log_is_enabled(Debug, safepoint, stats)) {
- begin_statistics(nof_threads, still_running);
- }
- }
-
- if (still_running > 0) {
- // Check for if it takes to long
- if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
- print_safepoint_timeout(_spinning_timeout);
- }
+ assert((_safepoint_counter & 0x1) == 0, "must be even");
+ // The store to _safepoint_counter must happen after any stores in arming.
+ OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
- // Spin to avoid context switching.
- // There's a tension between allowing the mutators to run (and rendezvous)
- // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that
- // a mutator might otherwise use profitably to reach a safepoint. Excessive
- // spinning by the VM thread on a saturated system can increase rendezvous latency.
- // Blocking or yielding incur their own penalties in the form of context switching
- // and the resultant loss of $ residency.
- //
- // Further complicating matters is that yield() does not work as naively expected
- // on many platforms -- yield() does not guarantee that any other ready threads
- // will run. As such we revert to naked_short_sleep() after some number of iterations.
- // nakes_short_sleep() is implemented as a short unconditional sleep.
- // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
- // can actually increase the time it takes the VM thread to detect that a system-wide
- // stop-the-world safepoint has been reached. In a pathological scenario such as that
- // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
- // In that case the mutators will be stalled waiting for the safepoint to complete and the
- // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread
- // will eventually wake up and detect that all mutators are safe, at which point
- // we'll again make progress.
- //
- // Beware too that that the VMThread typically runs at elevated priority.
- // Its default priority is higher than the default mutator priority.
- // Obviously, this complicates spinning.
- //
- // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
- // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
- //
- // See the comments in synchronizer.cpp for additional remarks on spinning.
- //
- // In the future we might:
- // -- Modify the safepoint scheme to avoid potentially unbounded spinning.
- // This is tricky as the path used by a thread exiting the JVM (say on
- // on JNI call-out) simply stores into its state field. The burden
- // is placed on the VM thread, which must poll (spin).
- // -- Find something useful to do while spinning. If the safepoint is GC-related
- // we might aggressively scan the stacks of threads that are already safe.
- // -- YieldTo() any still-running mutators that are ready but OFFPROC.
- // -- Check system saturation. If the system is not fully saturated then
- // simply spin and avoid sleep/yield.
- // -- As still-running mutators rendezvous they could unpark the sleeping
- // VMthread. This works well for still-running mutators that become
- // safe. The VMthread must still poll for mutators that call-out.
- // -- Drive the policy on time-since-begin instead of iterations.
- // -- Consider making the spin duration a function of the # of CPUs:
- // Spin = (((ncpus-1) * M) + K) + F(still_running)
- // Alternately, instead of counting iterations of the outer loop
- // we could count the # of threads visited in the inner loop, above.
- // -- On windows consider using the return value from SwitchThreadTo()
- // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
+ // We are synchronizing
+ OrderAccess::storestore(); // Ordered with _safepoint_counter
+ _state = _synchronizing;
- if (int(iterations) == -1) { // overflow - something is wrong.
- // We can only overflow here when we are using global
- // polling pages. We keep this guarantee in its original
- // form so that searches of the bug database for this
- // failure mode find the right bugs.
- guarantee (PageArmed == 0, "invariant");
- }
-
- // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
- // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
- ++steps ;
- if (ncpus > 1 && steps < safepoint_spin_before_yield) {
- SpinPause() ; // MP-Polite spin
- } else
- if (steps < _defer_thr_suspend_loop_count) {
- os::naked_yield() ;
- } else {
- os::naked_short_sleep(1);
- }
-
- iterations ++ ;
- }
- assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
- }
- } // ThreadsListHandle destroyed here.
- assert(still_running == 0, "sanity check");
-
- if (log_is_enabled(Debug, safepoint, stats)) {
- update_statistics_on_spin_end();
- }
- if (sync_event.should_commit()) {
- post_safepoint_synchronize_event(&sync_event, initial_running, _waiting_to_block, iterations);
+ if (SafepointMechanism::uses_thread_local_poll()) {
+ // Arming the per thread poll while having _state != _not_synchronized means safepointing
+ log_trace(safepoint)("Setting thread local yield flag for threads");
+ OrderAccess::storestore(); // storestore, global state -> local state
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
+ // Make sure the threads start polling, it is time to yield.
+ SafepointMechanism::arm_local_poll(cur);
}
}
+ OrderAccess::fence(); // storestore|storeload, global state -> local state
- // wait until all threads are stopped
- {
- EventSafepointWaitBlocked wait_blocked_event;
- int initial_waiting_to_block = _waiting_to_block;
+ if (SafepointMechanism::uses_global_page_poll()) {
+ // Make interpreter safepoint aware
+ Interpreter::notice_safepoints();
+
+ // Make polling safepoint aware
+ guarantee (!PageArmed, "invariant") ;
+ PageArmed = true;
+ os::make_polling_page_unreadable();
+ }
+}
+
+// Roll all threads forward to a safepoint and suspend them all
+void SafepointSynchronize::begin() {
+ EventSafepointBegin begin_event;
+ assert(Thread::current()->is_VM_thread(), "Only VM thread may execute a safepoint");
+
+ strncpy(stopped_description, VMThread::vm_safepoint_description(), sizeof(stopped_description) - 1);
+ stopped_description[sizeof(stopped_description) - 1] = '\0';
+
+ if (log_is_enabled(Debug, safepoint, stats)) {
+ _safepoint_begin_time = os::javaTimeNanos();
+ _ts_of_current_safepoint = tty->time_stamp().seconds();
+ _nof_threads_hit_polling_page = 0;
+ }
+
+ Universe::heap()->safepoint_synchronize_begin();
+
+ // By getting the Threads_lock, we assure that no threads are about to start or
+ // exit. It is released again in SafepointSynchronize::end().
+ Threads_lock->lock();
+
+ assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
- while (_waiting_to_block > 0) {
- log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block);
- if (!SafepointTimeout || timeout_error_printed) {
- Safepoint_lock->wait(true); // true, means with no safepoint checks
- } else {
- // Compute remaining time
- jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
+ int nof_threads = Threads::number_of_threads();
+
+ log_debug(safepoint)("Safepoint synchronization initiated using %s wait barrier. (%d threads)", _wait_barrier->description(), nof_threads);
+
+ RuntimeService::record_safepoint_begin();
+
+ // Reset the count of active JNI critical threads
+ _current_jni_active_count = 0;
+
+ // Set number of threads to wait for
+ _waiting_to_block = nof_threads;
- // If there is no remaining time, then there is an error
- if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
- print_safepoint_timeout(_blocking_timeout);
- }
- }
- }
- assert(_waiting_to_block == 0, "sanity check");
+ jlong safepoint_limit_time = 0;
+ if (SafepointTimeout) {
+ // Set the limit time, so that it can be compared to see if this has taken
+ // too long to complete.
+ safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
+ }
+ timeout_error_printed = false;
+
+ EventSafepointStateSynchronization sync_event;
+ int initial_running = 0;
+
+ // Arms the safepoint, _current_jni_active_count and _waiting_to_block must be set before.
+ arm_safepoint();
+
+ // Will spin until all threads are safe.
+ int iterations = synchronize_threads(safepoint_limit_time, nof_threads, &initial_running);
+ assert(_waiting_to_block == 0, "No thread should be running");
+
+ post_safepoint_synchronize_event(sync_event, _safepoint_counter, initial_running,
+ _waiting_to_block, iterations);
+
+ // Keep event from now.
+ EventSafepointWaitBlocked wait_blocked_event;
#ifndef PRODUCT
- if (SafepointTimeout) {
- jlong current_time = os::javaTimeNanos();
- if (safepoint_limit_time < current_time) {
- log_warning(safepoint)("# SafepointSynchronize: Finished after "
- INT64_FORMAT_W(6) " ms",
- (int64_t)((current_time - safepoint_limit_time) / MICROUNITS +
- (jlong)SafepointTimeoutDelay));
- }
+ if (SafepointTimeout) {
+ jlong current_time = os::javaTimeNanos();
+ if (safepoint_limit_time < current_time) {
+ log_warning(safepoint)("# SafepointSynchronize: Finished after "
+ INT64_FORMAT_W(6) " ms",
+ (int64_t)((current_time - safepoint_limit_time) / MICROUNITS +
+ (jlong)SafepointTimeoutDelay));
}
+ }
#endif
- assert((_safepoint_counter & 0x1) == 0, "must be even");
- assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
- _safepoint_counter ++;
+ assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
- // Record state
- _state = _synchronized;
+ // Record state
+ _state = _synchronized;
- OrderAccess::fence();
- if (wait_blocked_event.should_commit()) {
- post_safepoint_wait_blocked_event(&wait_blocked_event, initial_waiting_to_block);
- }
- }
+ OrderAccess::fence();
+
+ post_safepoint_wait_blocked_event(wait_blocked_event, _safepoint_counter, 0);
#ifdef ASSERT
// Make sure all the threads were visited.
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
- assert(cur->was_visited_for_critical_count(), "missed a thread");
+ assert(cur->was_visited_for_critical_count(_safepoint_counter), "missed a thread");
}
#endif // ASSERT
// Update the count of active JNI critical regions
GCLocker::set_jni_lock_count(_current_jni_active_count);
- log_info(safepoint)("Entering safepoint region: %s", VMThread::vm_safepoint_description());
+ log_info(safepoint)("Entering safepoint region: %s", stopped_description);
RuntimeService::record_safepoint_synchronized();
if (log_is_enabled(Debug, safepoint, stats)) {
update_statistics_on_sync_end(os::javaTimeNanos());
}
- // Call stuff that needs to be run when a safepoint is just about to be completed
- {
- EventSafepointCleanup cleanup_event;
- do_cleanup_tasks();
- if (cleanup_event.should_commit()) {
- post_safepoint_cleanup_event(&cleanup_event);
- }
- }
+ // We do the safepoint cleanup first since a GC related safepoint
+ // needs cleanup to be completed before running the GC op.
+ EventSafepointCleanup cleanup_event;
+ do_cleanup_tasks();
+ post_safepoint_cleanup_event(cleanup_event, _safepoint_counter);
if (log_is_enabled(Debug, safepoint, stats)) {
// Record how much time spend on the above cleanup tasks
update_statistics_on_cleanup_end(os::javaTimeNanos());
}
- if (begin_event.should_commit()) {
- post_safepoint_begin_event(&begin_event, nof_threads, _current_jni_active_count);
- }
+ post_safepoint_begin_event(begin_event, _safepoint_counter, nof_threads, _current_jni_active_count);
}
-// Wake up all threads, so they are ready to resume execution after the safepoint
-// operation has been carried out
-void SafepointSynchronize::end() {
- assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
- assert((_safepoint_counter & 0x1) == 1, "must be odd");
- EventSafepointEnd event;
- _safepoint_counter ++;
- // memory fence isn't required here since an odd _safepoint_counter
- // value can do no harm and a fence is issued below anyway.
-
- DEBUG_ONLY(Thread* myThread = Thread::current();)
- assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
-
- if (log_is_enabled(Debug, safepoint, stats)) {
- end_statistics(os::javaTimeNanos());
- }
-
+void SafepointSynchronize::disarm_safepoint() {
+ uint64_t safepoint_id = _safepoint_counter;
{
JavaThreadIteratorWithHandle jtiwh;
#ifdef ASSERT
@@ -508,66 +486,74 @@
}
#endif // ASSERT
- if (PageArmed) {
- assert(SafepointMechanism::uses_global_page_poll(), "sanity");
+ if (SafepointMechanism::uses_global_page_poll()) {
+ guarantee (PageArmed, "invariant");
// Make polling safepoint aware
os::make_polling_page_readable();
- PageArmed = 0 ;
- }
-
- if (SafepointMechanism::uses_global_page_poll()) {
+ PageArmed = false;
// Remove safepoint check from interpreter
Interpreter::ignore_safepoints();
}
- {
- MutexLocker mu(Safepoint_lock);
+ OrderAccess::fence(); // keep read and write of _state from floating up
+ assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
+
+ // Change state first to _not_synchronized.
+ // No threads should see _synchronized when running.
+ _state = _not_synchronized;
+
+ // Set the next dormant (even) safepoint id.
+ assert((_safepoint_counter & 0x1) == 1, "must be odd");
+ OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
- assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
+ OrderAccess::fence(); // Keep the local state from floating up.
+
+ jtiwh.rewind();
+ for (; JavaThread *current = jtiwh.next(); ) {
+ // Clear the visited flag to ensure that the critical counts are collected properly.
+ DEBUG_ONLY(current->reset_visited_for_critical_count(safepoint_id);)
+ ThreadSafepointState* cur_state = current->safepoint_state();
+ assert(!cur_state->is_running(), "Thread not suspended at safepoint");
+ cur_state->restart(); // TSS _running
+ assert(cur_state->is_running(), "safepoint state has not been reset");
+ SafepointMechanism::disarm_local_poll(current);
+ }
+ } // ~JavaThreadIteratorWithHandle
- if (SafepointMechanism::uses_thread_local_poll()) {
- _state = _not_synchronized;
- OrderAccess::storestore(); // global state -> local state
- jtiwh.rewind();
- for (; JavaThread *current = jtiwh.next(); ) {
- ThreadSafepointState* cur_state = current->safepoint_state();
- cur_state->restart(); // TSS _running
- SafepointMechanism::disarm_local_poll(current);
- }
- log_info(safepoint)("Leaving safepoint region");
- } else {
- // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
- // when they get restarted.
- _state = _not_synchronized;
- OrderAccess::fence();
+ log_info(safepoint)("Leaving safepoint region");
+
+ RuntimeService::record_safepoint_end();
- log_info(safepoint)("Leaving safepoint region");
+ // Release threads lock, so threads can be created/destroyed again.
+ Threads_lock->unlock();
+
+ // Wake threads after local state is correctly set.
+ _wait_barrier->disarm();
+}
- // Start suspended threads
- jtiwh.rewind();
- for (; JavaThread *current = jtiwh.next(); ) {
- ThreadSafepointState* cur_state = current->safepoint_state();
- assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
- cur_state->restart();
- assert(cur_state->is_running(), "safepoint state has not been reset");
- }
- }
+// Wake up all threads, so they are ready to resume execution after the safepoint
+// operation has been carried out
+void SafepointSynchronize::end() {
+ assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
+ EventSafepointEnd event;
+ uint64_t safepoint_id = _safepoint_counter;
+ assert(Thread::current()->is_VM_thread(), "Only VM thread can execute a safepoint");
- RuntimeService::record_safepoint_end();
+ if (log_is_enabled(Debug, safepoint, stats)) {
+ end_statistics(os::javaTimeNanos());
+ }
- // Release threads lock, so threads can be created/destroyed again.
- // It will also release all threads blocked in signal_thread_blocked.
- Threads_lock->unlock();
- }
- } // ThreadsListHandle destroyed here.
+ disarm_safepoint();
+
+ RuntimeService::record_safepoint_epilog(stopped_description);
Universe::heap()->safepoint_synchronize_end();
+
// record this time so VMThread can keep track how much time has elapsed
// since last safepoint.
_end_of_last_safepoint = os::javaTimeMillis();
- if (event.should_commit()) {
- post_safepoint_end_event(&event);
- }
+
+ post_safepoint_end_event(event, safepoint_id);
}
bool SafepointSynchronize::is_cleanup_needed() {
@@ -613,6 +599,7 @@
_counters(counters) {}
void work(uint worker_id) {
+ uint64_t safepoint_id = SafepointSynchronize::safepoint_counter();
// All threads deflate monitors and mark nmethods (if necessary).
Threads::possibly_parallel_threads_do(true, &_cleanup_threads_cl);
@@ -621,9 +608,8 @@
EventSafepointCleanupTask event;
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
ObjectSynchronizer::deflate_idle_monitors(_counters);
- if (event.should_commit()) {
- post_safepoint_cleanup_task_event(&event, name);
- }
+
+ post_safepoint_cleanup_task_event(event, safepoint_id, name);
}
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) {
@@ -631,9 +617,8 @@
EventSafepointCleanupTask event;
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
InlineCacheBuffer::update_inline_caches();
- if (event.should_commit()) {
- post_safepoint_cleanup_task_event(&event, name);
- }
+
+ post_safepoint_cleanup_task_event(event, safepoint_id, name);
}
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_COMPILATION_POLICY)) {
@@ -641,9 +626,8 @@
EventSafepointCleanupTask event;
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
CompilationPolicy::policy()->do_safepoint_work();
- if (event.should_commit()) {
- post_safepoint_cleanup_task_event(&event, name);
- }
+
+ post_safepoint_cleanup_task_event(event, safepoint_id, name);
}
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH)) {
@@ -652,9 +636,8 @@
EventSafepointCleanupTask event;
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
SymbolTable::rehash_table();
- if (event.should_commit()) {
- post_safepoint_cleanup_task_event(&event, name);
- }
+
+ post_safepoint_cleanup_task_event(event, safepoint_id, name);
}
}
@@ -664,9 +647,8 @@
EventSafepointCleanupTask event;
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
StringTable::rehash_table();
- if (event.should_commit()) {
- post_safepoint_cleanup_task_event(&event, name);
- }
+
+ post_safepoint_cleanup_task_event(event, safepoint_id, name);
}
}
@@ -677,9 +659,8 @@
EventSafepointCleanupTask event;
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
ClassLoaderDataGraph::purge_if_needed();
- if (event.should_commit()) {
- post_safepoint_cleanup_task_event(&event, name);
- }
+
+ post_safepoint_cleanup_task_event(event, safepoint_id, name);
}
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) {
@@ -687,9 +668,8 @@
EventSafepointCleanupTask event;
TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
ClassLoaderDataGraph::resize_if_needed();
- if (event.should_commit()) {
- post_safepoint_cleanup_task_event(&event, name);
- }
+
+ post_safepoint_cleanup_task_event(event, safepoint_id, name);
}
_subtasks.all_tasks_completed(_num_workers);
@@ -736,15 +716,48 @@
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
}
+// Methods for determining if a JavaThread is safepoint safe.
-bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
+// False means unsafe with undetermined state.
+// True means a determined state, but it may be an unsafe state.
+// If called from a non-safepoint context safepoint_count MUST be InactiveSafepointCounter.
+bool SafepointSynchronize::try_stable_load_state(JavaThreadState *state, JavaThread *thread, uint64_t safepoint_count) {
+ assert((safepoint_count != InactiveSafepointCounter &&
+ Thread::current() == (Thread*)VMThread::vm_thread() &&
+ SafepointSynchronize::_state != _not_synchronized)
+ || safepoint_count == InactiveSafepointCounter, "Invalid check");
+
+ // To handle the thread_blocked state on the backedge of the WaitBarrier from
+ // previous safepoint and reading the reset value (0/InactiveSafepointCounter) we
+ // re-read state after we read thread safepoint id. The JavaThread changes its
+ // thread state from thread_blocked before resetting safepoint id to 0.
+ // This guarantees the second read will be from an updated thread state. It can
+ // either be different state making this an unsafe state or it can see blocked
+ // again. When we see blocked twice with a 0 safepoint id, either:
+ // - It is normally blocked, e.g. on Mutex, TBIVM.
+ // - It was in SS:block(), looped around to SS:block() and is blocked on the WaitBarrier.
+ // - It was in SS:block() but now on a Mutex.
+ // All of these cases are safe.
+
+ *state = thread->thread_state();
+ OrderAccess::loadload();
+ uint64_t sid = thread->safepoint_state()->get_safepoint_id(); // Load acquire
+ if (sid != InactiveSafepointCounter && sid != safepoint_count) {
+ // In an old safepoint, state not relevant.
+ return false;
+ }
+ return *state == thread->thread_state();
+}
+
+static bool safepoint_safe_with(JavaThread *thread, JavaThreadState state) {
switch(state) {
case _thread_in_native:
// native threads are safe if they have no java stack or have walkable stack
return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
- // blocked threads should have already have walkable stack
case _thread_blocked:
+ // On wait_barrier or blocked.
+ // Blocked threads should already have walkable stack.
assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
return true;
@@ -753,12 +766,28 @@
}
}
+bool SafepointSynchronize::handshake_safe(JavaThread *thread) {
+ // The polls must be armed otherwise the safe state can change to unsafe at any time.
+ assert(SafepointMechanism::should_block(thread), "Must be armed");
+ // This function must be called with the Threads_lock held so an externally
+ // suspended thread cannot be resumed thus it is safe.
+ assert(Threads_lock->owned_by_self() && Thread::current()->is_VM_thread(),
+ "Must hold Threads_lock and be VMThread");
+ if (thread->is_ext_suspended() || thread->is_terminated()) {
+ return true;
+ }
+ JavaThreadState stable_state;
+ if (try_stable_load_state(&stable_state, thread, InactiveSafepointCounter)) {
+ return safepoint_safe_with(thread, stable_state);
+ }
+ return false;
+}
// See if the thread is running inside a lazy critical native and
// update the thread critical count if so. Also set a suspend flag to
// cause the native wrapper to return into the JVM to do the unlock
// once the native finishes.
-void SafepointSynchronize::check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) {
+static void check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) {
if (state == _thread_in_native &&
thread->has_last_Java_frame() &&
thread->frame_anchor()->walkable()) {
@@ -788,12 +817,10 @@
}
}
-
+// -------------------------------------------------------------------------------------------------------
+// Implementation of Safepoint blocking point
-// -------------------------------------------------------------------------------------------------------
-// Implementation of Safepoint callback point
-
-void SafepointSynchronize::block(JavaThread *thread, bool block_in_safepoint_check) {
+void SafepointSynchronize::block(JavaThread *thread) {
assert(thread != NULL, "thread must be set");
assert(thread->is_Java_thread(), "not a Java thread");
@@ -813,101 +840,45 @@
JavaThreadState state = thread->thread_state();
thread->frame_anchor()->make_walkable(thread);
+ uint64_t safepoint_id = SafepointSynchronize::safepoint_counter();
// Check that we have a valid thread_state at this point
switch(state) {
case _thread_in_vm_trans:
case _thread_in_Java: // From compiled code
-
- // We are highly likely to block on the Safepoint_lock. In order to avoid blocking in this case,
- // we pretend we are still in the VM.
- thread->set_thread_state(_thread_in_vm);
-
- if (is_synchronizing()) {
- Atomic::inc (&TryingToBlock) ;
- }
-
- // We will always be holding the Safepoint_lock when we are examine the state
- // of a thread. Hence, the instructions between the Safepoint_lock->lock() and
- // Safepoint_lock->unlock() are happening atomic with regards to the safepoint code
- Safepoint_lock->lock_without_safepoint_check();
- if (is_synchronizing()) {
- // Decrement the number of threads to wait for and signal vm thread
- assert(_waiting_to_block > 0, "sanity check");
- _waiting_to_block--;
- thread->safepoint_state()->set_has_called_back(true);
-
- DEBUG_ONLY(thread->set_visited_for_critical_count(true));
- if (thread->in_critical()) {
- // Notice that this thread is in a critical section
- increment_jni_active_count();
- }
-
- // Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
- if (_waiting_to_block == 0) {
- Safepoint_lock->notify_all();
- }
- }
-
- if (block_in_safepoint_check) {
- // We transition the thread to state _thread_blocked here, but
- // we can't do our usual check for external suspension and then
- // self-suspend after the lock_without_safepoint_check() call
- // below because we are often called during transitions while
- // we hold different locks. That would leave us suspended while
- // holding a resource which results in deadlocks.
- thread->set_thread_state(_thread_blocked);
- Safepoint_lock->unlock();
-
- // We now try to acquire the threads lock. Since this lock is hold by the VM thread during
- // the entire safepoint, the threads will all line up here during the safepoint.
- Threads_lock->lock_without_safepoint_check();
- // restore original state. This is important if the thread comes from compiled code, so it
- // will continue to execute with the _thread_in_Java state.
- thread->set_thread_state(state);
- Threads_lock->unlock();
- } else {
- // We choose not to block in this call since we would be
- // caught when transitioning back anyways if the safepoint
- // is still going on.
- thread->set_thread_state(state);
- Safepoint_lock->unlock();
- }
- break;
-
case _thread_in_native_trans:
case _thread_blocked_trans:
case _thread_new_trans:
- if (thread->safepoint_state()->type() == ThreadSafepointState::_call_back &&
- block_in_safepoint_check) {
- thread->print_thread_state();
- fatal("Deadlock in safepoint code. "
- "Should have called back to the VM before blocking.");
- }
+
+ // We have no idea where the VMThread is, it might even be at next safepoint.
+ // So we can miss this poll, but stop at next.
- // We transition the thread to state _thread_blocked here, but
- // we can't do our usual check for external suspension and then
- // self-suspend after the lock_without_safepoint_check() call
- // below because we are often called during transitions while
- // we hold different locks. That would leave us suspended while
- // holding a resource which results in deadlocks.
+ // Load dependent store, it must not pass loading of safepoint_id.
+ thread->safepoint_state()->set_safepoint_id(safepoint_id); // Release store
+
+ // This part we can skip if we notice we miss or are in a future safepoint.
+ OrderAccess::storestore();
thread->set_thread_state(_thread_blocked);
- // It is not safe to suspend a thread if we discover it is in _thread_in_native_trans. Hence,
- // the safepoint code might still be waiting for it to block. We need to change the state here,
- // so it can see that it is at a safepoint.
+ OrderAccess::fence(); // Load in wait barrier should not float up
+ _wait_barrier->wait(static_cast<int>(safepoint_id));
+ assert(_state != _synchronized, "Can't be");
- // Block until the safepoint operation is completed.
- Threads_lock->lock_without_safepoint_check();
-
- // Restore state
+ // If barrier is disarmed stop store from floating above loads in barrier.
+ OrderAccess::loadstore();
thread->set_thread_state(state);
- Threads_lock->unlock();
+ // Then we reset the safepoint id to inactive.
+ thread->safepoint_state()->reset_safepoint_id(); // Release store
+
+ OrderAccess::fence();
+
break;
default:
fatal("Illegal threadstate encountered: %d", state);
}
+ guarantee(thread->safepoint_state()->get_safepoint_id() == InactiveSafepointCounter,
+ "The safepoint id should be set only in block path");
// Check for pending. async. exceptions or suspends - except if the
// thread was blocked inside the VM. has_special_runtime_exit_condition()
@@ -979,7 +950,7 @@
if (cur_thread->thread_state() != _thread_blocked &&
((reason == _spinning_timeout && cur_state->is_running()) ||
- (reason == _blocking_timeout && !cur_state->has_called_back()))) {
+ (reason == _blocking_timeout))) {
ls.print("# ");
cur_thread->print_on(&ls);
ls.cr();
@@ -1001,11 +972,10 @@
// -------------------------------------------------------------------------------------------------------
// Implementation of ThreadSafepointState
-ThreadSafepointState::ThreadSafepointState(JavaThread *thread) {
- _thread = thread;
- _type = _running;
- _has_called_back = false;
- _at_poll_safepoint = false;
+ThreadSafepointState::ThreadSafepointState(JavaThread *thread)
+ : _at_poll_safepoint(false), _thread(thread), _safepoint_safe(false),
+ _safepoint_id(SafepointSynchronize::InactiveSafepointCounter),
+ _orig_thread_state(_thread_uninitialized), _next(NULL) {
}
void ThreadSafepointState::create(JavaThread *thread) {
@@ -1020,13 +990,30 @@
}
}
-void ThreadSafepointState::examine_state_of_thread() {
+uint64_t ThreadSafepointState::get_safepoint_id() const {
+ return OrderAccess::load_acquire(&_safepoint_id);
+}
+
+void ThreadSafepointState::reset_safepoint_id() {
+ OrderAccess::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
+}
+
+void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) {
+ OrderAccess::release_store(&_safepoint_id, safepoint_id);
+}
+
+void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) {
assert(is_running(), "better be running or just have hit safepoint poll");
- JavaThreadState state = _thread->thread_state();
+ JavaThreadState stable_state;
+ if (!SafepointSynchronize::try_stable_load_state(&stable_state, _thread, safepoint_count)) {
+ // We could not get stable state of the JavaThread.
+ // Consider it running and just return.
+ return;
+ }
// Save the state at the start of safepoint processing.
- _orig_thread_state = state;
+ _orig_thread_state = stable_state;
// Check for a thread that is suspended. Note that thread resume tries
// to grab the Threads_lock which we own here, so a thread cannot be
@@ -1050,21 +1037,13 @@
//
bool is_suspended = _thread->is_ext_suspended();
if (is_suspended) {
- roll_forward(_at_safepoint);
+ account_safe_thread();
return;
}
- // Some JavaThread states have an initial safepoint state of
- // running, but are actually at a safepoint. We will happily
- // agree and update the safepoint state here.
- if (SafepointSynchronize::safepoint_safe(_thread, state)) {
- SafepointSynchronize::check_for_lazy_critical_native(_thread, state);
- roll_forward(_at_safepoint);
- return;
- }
-
- if (state == _thread_in_vm) {
- roll_forward(_call_back);
+ if (safepoint_safe_with(_thread, stable_state)) {
+ check_for_lazy_critical_native(_thread, stable_state);
+ account_safe_thread();
return;
}
@@ -1077,63 +1056,28 @@
return;
}
-// Returns true is thread could not be rolled forward at present position.
-void ThreadSafepointState::roll_forward(suspend_type type) {
- _type = type;
-
- switch(_type) {
- case _at_safepoint:
- SafepointSynchronize::signal_thread_at_safepoint();
- DEBUG_ONLY(_thread->set_visited_for_critical_count(true));
- if (_thread->in_critical()) {
- // Notice that this thread is in a critical section
- SafepointSynchronize::increment_jni_active_count();
- }
- break;
-
- case _call_back:
- set_has_called_back(false);
- break;
-
- case _running:
- default:
- ShouldNotReachHere();
+void ThreadSafepointState::account_safe_thread() {
+ SafepointSynchronize::decrement_waiting_to_block();
+ if (_thread->in_critical()) {
+ // Notice that this thread is in a critical section
+ SafepointSynchronize::increment_jni_active_count();
}
+ DEBUG_ONLY(_thread->set_visited_for_critical_count(SafepointSynchronize::safepoint_counter());)
+ assert(!_safepoint_safe, "Must be unsafe before safe");
+ _safepoint_safe = true;
}
void ThreadSafepointState::restart() {
- switch(type()) {
- case _at_safepoint:
- case _call_back:
- break;
-
- case _running:
- default:
- tty->print_cr("restart thread " INTPTR_FORMAT " with state %d",
- p2i(_thread), _type);
- _thread->print();
- ShouldNotReachHere();
- }
- _type = _running;
- set_has_called_back(false);
+ assert(_safepoint_safe, "Must be safe before unsafe");
+ _safepoint_safe = false;
}
-
void ThreadSafepointState::print_on(outputStream *st) const {
- const char *s = NULL;
-
- switch(_type) {
- case _running : s = "_running"; break;
- case _at_safepoint : s = "_at_safepoint"; break;
- case _call_back : s = "_call_back"; break;
- default:
- ShouldNotReachHere();
- }
+ const char *s = _safepoint_safe ? "_at_safepoint" : "_running";
st->print_cr("Thread: " INTPTR_FORMAT
- " [0x%2x] State: %s _has_called_back %d _at_poll_safepoint %d",
- p2i(_thread), _thread->osthread()->thread_id(), s, _has_called_back,
- _at_poll_safepoint);
+ " [0x%2x] State: %s _at_poll_safepoint %d",
+ p2i(_thread), _thread->osthread()->thread_id(), s, _at_poll_safepoint);
_thread->print_thread_state_on(st);
}
@@ -1143,11 +1087,10 @@
// Block the thread at poll or poll return for safepoint/handshake.
void ThreadSafepointState::handle_polling_page_exception() {
- // Check state. block() will set thread state to thread_in_vm which will
- // cause the safepoint state _type to become _call_back.
- suspend_type t = type();
- assert(!SafepointMechanism::uses_global_page_poll() || t == ThreadSafepointState::_running,
- "polling page exception on thread not running state: %u", uint(t));
+ // If we're using a global poll, then the thread should not be
+ // marked as safepoint safe yet.
+ assert(!SafepointMechanism::uses_global_page_poll() || !_safepoint_safe,
+ "polling page exception on thread safepoint safe");
// Step 1: Find the nmethod from the return address
address real_return_addr = thread()->saved_exception_pc();
--- a/src/hotspot/share/runtime/safepoint.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/safepoint.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -26,15 +26,15 @@
#define SHARE_RUNTIME_SAFEPOINT_HPP
#include "memory/allocation.hpp"
-#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
-#include "utilities/globalDefinitions.hpp"
+#include "runtime/thread.hpp"
#include "utilities/ostream.hpp"
+#include "utilities/waitBarrier.hpp"
//
// Safepoint synchronization
////
-// The VMThread or CMS_thread uses the SafepointSynchronize::begin/end
+// The VMThread uses the SafepointSynchronize::begin/end
// methods to enter/exit a safepoint region. The begin method will roll
// all JavaThreads forward to a safepoint.
//
@@ -45,9 +45,7 @@
// exit safepoint methods, when a thread is blocked/restarted. Hence, all mutex exter/
// exit points *must* be at a safepoint.
-
class ThreadSafepointState;
-class JavaThread;
//
// Implements roll-forward to safepoint (safepoint synchronization)
@@ -55,21 +53,10 @@
class SafepointSynchronize : AllStatic {
public:
enum SynchronizeState {
- _not_synchronized = 0, // Threads not synchronized at a safepoint
- // Keep this value 0. See the comment in do_call_back()
+ _not_synchronized = 0, // Threads not synchronized at a safepoint. Keep this value 0.
_synchronizing = 1, // Synchronizing in progress
- _synchronized = 2 // All Java threads are stopped at a safepoint. Only VM thread is running
- };
-
- enum SafepointingThread {
- _null_thread = 0,
- _vm_thread = 1,
- _other_thread = 2
- };
-
- enum SafepointTimeoutReason {
- _spinning_timeout = 0,
- _blocking_timeout = 1
+ _synchronized = 2 // All Java threads are running in native, blocked in OS or stopped at safepoint.
+ // VM thread and any NonJavaThread may be running.
};
// The enums are listed in the order of the tasks when done serially.
@@ -86,22 +73,33 @@
};
private:
- static volatile SynchronizeState _state; // Threads might read this flag directly, without acquiring the Threads_lock
- static volatile int _waiting_to_block; // number of threads we are waiting for to block
- static int _current_jni_active_count; // Counts the number of active critical natives during the safepoint
- static int _defer_thr_suspend_loop_count; // Iterations before blocking VM threads
+ friend class SafepointMechanism;
+ friend class ThreadSafepointState;
+ friend class HandshakeState;
+
+ enum SafepointTimeoutReason {
+ _spinning_timeout = 0,
+ _blocking_timeout = 1
+ };
+
+ // Threads might read this flag directly, without acquiring the Threads_lock:
+ static volatile SynchronizeState _state;
+ // Number of threads we are waiting for to block:
+ static int _waiting_to_block;
+ // Counts the number of active critical natives during the safepoint:
+ static int _current_jni_active_count;
// This counter is used for fast versions of jni_Get<Primitive>Field.
- // An even value means there is no ongoing safepoint operations.
+ // An even value means there are no ongoing safepoint operations.
// The counter is incremented ONLY at the beginning and end of each
- // safepoint. The fact that Threads_lock is held throughout each pair of
- // increments (at the beginning and end of each safepoint) guarantees
- // race freedom.
+ // safepoint.
static volatile uint64_t _safepoint_counter;
-private:
- static long _end_of_last_safepoint; // Time of last safepoint in milliseconds
- static julong _coalesced_vmop_count; // coalesced vmop count
+ // JavaThreads that need to block for the safepoint will stop on the
+ // _wait_barrier, where they can quickly be started again.
+ static WaitBarrier* _wait_barrier;
+ static long _end_of_last_safepoint; // Time of last safepoint in milliseconds
+ static julong _coalesced_vmop_count; // coalesced vmop count
// Statistics
static void begin_statistics(int nof_threads, int nof_running);
@@ -114,42 +112,41 @@
// For debug long safepoint
static void print_safepoint_timeout(SafepointTimeoutReason timeout_reason);
+ // Helper methods for safepoint procedure:
+ static void arm_safepoint();
+ static int synchronize_threads(jlong safepoint_limit_time, int nof_threads, int* initial_running);
+ static void disarm_safepoint();
+ static void increment_jni_active_count();
+ static void decrement_waiting_to_block();
+
+ // Used in safepoint_safe to do a stable load of the thread state.
+ static bool try_stable_load_state(JavaThreadState *state,
+ JavaThread *thread,
+ uint64_t safepoint_count);
+
+ // Called when a thread voluntarily blocks
+ static void block(JavaThread *thread);
+
+ // Called from VMThread during handshakes.
+ // If true the VMThread may safely process the handshake operation for the JavaThread.
+ static bool handshake_safe(JavaThread *thread);
+
public:
- // Main entry points
+ static void init(Thread* vmthread);
- // Roll all threads forward to safepoint. Must be called by the
- // VMThread or CMS_thread.
+ // Roll all threads forward to safepoint. Must be called by the VMThread.
static void begin();
static void end(); // Start all suspended threads again...
- static bool safepoint_safe(JavaThread *thread, JavaThreadState state);
-
- static void check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state);
+ // The value for a not set safepoint id.
+ static const uint64_t InactiveSafepointCounter;
// Query
- inline static bool is_at_safepoint() { return _state == _synchronized; }
- inline static bool is_synchronizing() { return _state == _synchronizing; }
- inline static uint64_t safepoint_counter() { return _safepoint_counter; }
-
- inline static void increment_jni_active_count() {
- assert_locked_or_safepoint(Safepoint_lock);
- _current_jni_active_count++;
- }
-
-private:
- inline static bool do_call_back() {
- return (_state != _not_synchronized);
- }
-
- // Called when a thread voluntarily blocks
- static void block(JavaThread *thread, bool block_in_safepoint_check = true);
-
- friend class SafepointMechanism;
-
-public:
- static void signal_thread_at_safepoint() { _waiting_to_block--; }
-
+ static bool is_at_safepoint() { return _state == _synchronized; }
+ static bool is_synchronizing() { return _state == _synchronizing; }
+ static uint64_t safepoint_counter() { return _safepoint_counter; }
+ static bool is_same_safepoint(uint64_t counter) { return (SafepointSynchronize::safepoint_counter() - counter) < 2; }
// Exception handling for page polling
static void handle_polling_page_exception(JavaThread *thread);
@@ -164,13 +161,13 @@
static void do_cleanup_tasks();
static void print_stat_on_exit();
- inline static void inc_vmop_coalesced_count() { _coalesced_vmop_count++; }
+ static void inc_vmop_coalesced_count() { _coalesced_vmop_count++; }
- static void set_is_at_safepoint() { _state = _synchronized; }
- static void set_is_not_at_safepoint() { _state = _not_synchronized; }
+ static void set_is_at_safepoint() { _state = _synchronized; }
+ static void set_is_not_at_safepoint() { _state = _not_synchronized; }
// Assembly support
- static address address_of_state() { return (address)&_state; }
+ static address address_of_state() { return (address)&_state; }
// Only used for making sure that no safepoint has happened in
// JNI_FastGetField. Therefore only the low 32-bits are needed
@@ -201,44 +198,43 @@
// State class for a thread suspended at a safepoint
class ThreadSafepointState: public CHeapObj<mtInternal> {
- public:
- // These states are maintained by VM thread while threads are being brought
- // to a safepoint. After SafepointSynchronize::end(), they are reset to
- // _running.
- enum suspend_type {
- _running = 0, // Thread state not yet determined (i.e., not at a safepoint yet)
- _at_safepoint = 1, // Thread at a safepoint (f.ex., when blocked on a lock)
- _call_back = 2 // Keep executing and wait for callback (if thread is in interpreted or vm)
- };
private:
- volatile bool _at_poll_safepoint; // At polling page safepoint (NOT a poll return safepoint)
- // Thread has called back the safepoint code (for debugging)
- bool _has_called_back;
+ // At polling page safepoint (NOT a poll return safepoint):
+ volatile bool _at_poll_safepoint;
+ JavaThread* _thread;
+ bool _safepoint_safe;
+ volatile uint64_t _safepoint_id;
+ JavaThreadState _orig_thread_state;
- JavaThread * _thread;
- volatile suspend_type _type;
- JavaThreadState _orig_thread_state;
+ ThreadSafepointState* _next;
+ void account_safe_thread();
public:
ThreadSafepointState(JavaThread *thread);
- // examine/roll-forward/restart
- void examine_state_of_thread();
- void roll_forward(suspend_type type);
+ // Linked list support:
+ ThreadSafepointState* get_next() const { return _next; }
+ void set_next(ThreadSafepointState* value) { _next = value; }
+ ThreadSafepointState** next_ptr() { return &_next; }
+
+ // examine/restart
+ void examine_state_of_thread(uint64_t safepoint_count);
void restart();
// Query
JavaThread* thread() const { return _thread; }
- suspend_type type() const { return _type; }
- bool is_running() const { return (_type==_running); }
+ bool is_running() const { return !_safepoint_safe; }
+
+ uint64_t get_safepoint_id() const;
+ void reset_safepoint_id();
+ void set_safepoint_id(uint64_t sid);
+
JavaThreadState orig_thread_state() const { return _orig_thread_state; }
// Support for safepoint timeout (debugging)
- bool has_called_back() const { return _has_called_back; }
- void set_has_called_back(bool val) { _has_called_back = val; }
- bool is_at_poll_safepoint() { return _at_poll_safepoint; }
- void set_at_poll_safepoint(bool val) { _at_poll_safepoint = val; }
+ bool is_at_poll_safepoint() { return _at_poll_safepoint; }
+ void set_at_poll_safepoint(bool val) { _at_poll_safepoint = val; }
void handle_polling_page_exception();
--- a/src/hotspot/share/runtime/safepointMechanism.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/safepointMechanism.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -86,6 +86,9 @@
void SafepointMechanism::block_if_requested_slow(JavaThread *thread) {
// local poll already checked, if used.
if (global_poll()) {
+ // Any load in ::block must not pass the global poll load.
+ // Otherwise we might load an old safepoint counter (for example).
+ OrderAccess::loadload();
SafepointSynchronize::block(thread);
}
if (uses_thread_local_poll() && thread->has_handshake()) {
--- a/src/hotspot/share/runtime/safepointMechanism.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/safepointMechanism.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -78,9 +78,6 @@
// Blocks a thread until safepoint/handshake is completed.
static inline void block_if_requested(JavaThread* thread);
- // Calls back if there is a pending safepoint but does not block for it.
- static inline void callback_if_safepoint(JavaThread* thread);
-
// Caller is responsible for using a memory barrier if needed.
static inline void arm_local_poll(JavaThread* thread);
static inline void disarm_local_poll(JavaThread* thread);
--- a/src/hotspot/share/runtime/safepointMechanism.inline.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/safepointMechanism.inline.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -35,7 +35,7 @@
}
bool SafepointMechanism::global_poll() {
- return SafepointSynchronize::do_call_back();
+ return (SafepointSynchronize::_state != SafepointSynchronize::_not_synchronized);
}
bool SafepointMechanism::local_poll(Thread* thread) {
@@ -62,20 +62,6 @@
block_if_requested_slow(thread);
}
-void SafepointMechanism::callback_if_safepoint(JavaThread* thread) {
- if (!uses_thread_local_poll() || local_poll_armed(thread)) {
- // If using thread local polls, we should not check the
- // global_poll() and callback via block() if the VMThread
- // has not yet armed the local poll. Otherwise, when used in
- // combination with should_block(), the latter could miss
- // detecting the same safepoint that this method would detect
- // if only checking global polls.
- if (global_poll()) {
- SafepointSynchronize::block(thread, false);
- }
- }
-}
-
void SafepointMechanism::arm_local_poll(JavaThread* thread) {
thread->set_polling_page(poll_armed_value());
}
--- a/src/hotspot/share/runtime/task.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/task.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -31,48 +31,20 @@
int PeriodicTask::_num_tasks = 0;
PeriodicTask* PeriodicTask::_tasks[PeriodicTask::max_tasks];
-#ifndef PRODUCT
-elapsedTimer PeriodicTask::_timer;
-int PeriodicTask::_intervalHistogram[PeriodicTask::max_interval];
-int PeriodicTask::_ticks;
-
-void PeriodicTask::print_intervals() {
- if (ProfilerCheckIntervals) {
- for (int i = 0; i < PeriodicTask::max_interval; i++) {
- int n = _intervalHistogram[i];
- if (n > 0) tty->print_cr("%3d: %5d (%4.1f%%)", i, n, 100.0 * n / _ticks);
- }
- }
-}
-#endif
void PeriodicTask::real_time_tick(int delay_time) {
assert(Thread::current()->is_Watcher_thread(), "must be WatcherThread");
-#ifndef PRODUCT
- if (ProfilerCheckIntervals) {
- _ticks++;
- _timer.stop();
- int ms = (int)_timer.milliseconds();
- _timer.reset();
- _timer.start();
- if (ms >= PeriodicTask::max_interval) ms = PeriodicTask::max_interval - 1;
- _intervalHistogram[ms]++;
- }
-#endif
+ // The WatcherThread does not participate in the safepoint protocol
+ // for the PeriodicTask_lock because it is not a JavaThread.
+ MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+ int orig_num_tasks = _num_tasks;
- {
- // The WatcherThread does not participate in the safepoint protocol
- // for the PeriodicTask_lock because it is not a JavaThread.
- MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
- int orig_num_tasks = _num_tasks;
-
- for(int index = 0; index < _num_tasks; index++) {
- _tasks[index]->execute_if_pending(delay_time);
- if (_num_tasks < orig_num_tasks) { // task dis-enrolled itself
- index--; // re-do current slot as it has changed
- orig_num_tasks = _num_tasks;
- }
+ for(int index = 0; index < _num_tasks; index++) {
+ _tasks[index]->execute_if_pending(delay_time);
+ if (_num_tasks < orig_num_tasks) { // task dis-enrolled itself
+ index--; // re-do current slot as it has changed
+ orig_num_tasks = _num_tasks;
}
}
}
--- a/src/hotspot/share/runtime/task.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/task.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -58,13 +58,6 @@
// Can only be called by the WatcherThread
static void real_time_tick(int delay_time);
-#ifndef PRODUCT
- static elapsedTimer _timer; // measures time between ticks
- static int _ticks; // total number of ticks
- static int _intervalHistogram[max_interval]; // to check spacing of timer interrupts
- public:
- static void print_intervals();
-#endif
// Only the WatcherThread can cause us to execute PeriodicTasks
friend class WatcherThread;
public:
--- a/src/hotspot/share/runtime/thread.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/thread.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -421,11 +421,21 @@
#ifdef ASSERT
private:
- bool _visited_for_critical_count;
+ volatile uint64_t _visited_for_critical_count;
public:
- void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; }
- bool was_visited_for_critical_count() const { return _visited_for_critical_count; }
+ void set_visited_for_critical_count(uint64_t safepoint_id) {
+ assert(_visited_for_critical_count == 0, "Must be reset before set");
+ assert((safepoint_id & 0x1) == 1, "Must be odd");
+ _visited_for_critical_count = safepoint_id;
+ }
+ void reset_visited_for_critical_count(uint64_t safepoint_id) {
+ assert(_visited_for_critical_count == safepoint_id, "Was not visited");
+ _visited_for_critical_count = 0;
+ }
+ bool was_visited_for_critical_count(uint64_t safepoint_id) const {
+ return _visited_for_critical_count == safepoint_id;
+ }
#endif
public:
--- a/src/hotspot/share/runtime/vmThread.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/runtime/vmThread.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -458,6 +458,8 @@
void VMThread::loop() {
assert(_cur_vm_operation == NULL, "no current one should be executing");
+ SafepointSynchronize::init(_vm_thread);
+
while(true) {
VM_Operation* safepoint_ops = NULL;
//
--- a/src/hotspot/share/services/runtimeService.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/services/runtimeService.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "logging/log.hpp"
+#include "runtime/timer.hpp"
#include "runtime/vm_version.hpp"
#include "services/attachListener.hpp"
#include "services/management.hpp"
@@ -40,7 +41,9 @@
PerfCounter* RuntimeService::_total_safepoints = NULL;
PerfCounter* RuntimeService::_safepoint_time_ticks = NULL;
PerfCounter* RuntimeService::_application_time_ticks = NULL;
-double RuntimeService::_last_safepoint_sync_time_sec = 0.0;
+jlong RuntimeService::_last_safepoint_sync_time_ns = 0;
+jlong RuntimeService::_last_safepoint_end_time_ns = 0;
+jlong RuntimeService::_last_app_time_ns = 0;
void RuntimeService::init() {
@@ -89,12 +92,14 @@
// Print the time interval in which the app was executing
if (_app_timer.is_updated()) {
- log_info(safepoint)("Application time: %3.7f seconds", last_application_time_sec());
+ _last_app_time_ns = _app_timer.ticks_since_update();
+ log_info(safepoint)("Application time: %3.7f seconds", TimeHelper::counter_to_seconds(_last_app_time_ns));
}
// update the time stamp to begin recording safepoint time
+ _last_safepoint_sync_time_ns = 0;
+ _last_safepoint_end_time_ns = 0;
_safepoint_timer.update();
- _last_safepoint_sync_time_sec = 0.0;
if (UsePerfData) {
_total_safepoints->inc();
if (_app_timer.is_updated()) {
@@ -107,18 +112,24 @@
if (UsePerfData) {
_sync_time_ticks->inc(_safepoint_timer.ticks_since_update());
}
- if (log_is_enabled(Info, safepoint)) {
- _last_safepoint_sync_time_sec = last_safepoint_time_sec();
+ if (log_is_enabled(Info, safepoint) || log_is_enabled(Info, safepoint, stats)) {
+ _last_safepoint_sync_time_ns = _safepoint_timer.ticks_since_update();
}
}
void RuntimeService::record_safepoint_end() {
HS_PRIVATE_SAFEPOINT_END();
- // Print the time interval for which the app was stopped
- // during the current safepoint operation.
- log_info(safepoint)("Total time for which application threads were stopped: %3.7f seconds, Stopping threads took: %3.7f seconds",
- last_safepoint_time_sec(), _last_safepoint_sync_time_sec);
+ // Logging of safepoint+stats=info needs _last_safepoint_end_time_ns to be set.
+ // Logging of safepoint=info needs _last_safepoint_end_time_ns for following log.
+ if (log_is_enabled(Info, safepoint) || log_is_enabled(Info, safepoint, stats)) {
+ _last_safepoint_end_time_ns = _safepoint_timer.ticks_since_update();
+ log_info(safepoint)(
+ "Total time for which application threads were stopped: %3.7f seconds, "
+ "Stopping threads took: %3.7f seconds",
+ TimeHelper::counter_to_seconds(_last_safepoint_end_time_ns),
+ TimeHelper::counter_to_seconds(_last_safepoint_sync_time_ns));
+ }
// update the time stamp to begin recording app time
_app_timer.update();
@@ -127,6 +138,25 @@
}
}
+void RuntimeService::record_safepoint_epilog(const char* operation_name) {
+ if (!log_is_enabled(Info, safepoint, stats)) {
+ return;
+ }
+
+ log_info(safepoint, stats)(
+ "Safepoint \"%s\", "
+ "Time since last: " JLONG_FORMAT " ns; "
+ "Reaching safepoint: " JLONG_FORMAT " ns; "
+ "At safepoint: " JLONG_FORMAT " ns; "
+ "Total: " JLONG_FORMAT " ns",
+ operation_name,
+ _last_app_time_ns,
+ _last_safepoint_sync_time_ns,
+ _last_safepoint_end_time_ns - _last_safepoint_sync_time_ns,
+ _last_safepoint_end_time_ns
+ );
+}
+
void RuntimeService::record_application_start() {
// update the time stamp to begin recording app time
_app_timer.update();
--- a/src/hotspot/share/services/runtimeService.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/services/runtimeService.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -37,7 +37,9 @@
static TimeStamp _safepoint_timer;
static TimeStamp _app_timer;
- static double _last_safepoint_sync_time_sec;
+ static jlong _last_safepoint_sync_time_ns;
+ static jlong _last_safepoint_end_time_ns;
+ static jlong _last_app_time_ns;
public:
static void init();
@@ -47,13 +49,11 @@
static jlong safepoint_time_ms();
static jlong application_time_ms();
- static double last_safepoint_time_sec() { return _safepoint_timer.seconds(); }
- static double last_application_time_sec() { return _app_timer.seconds(); }
-
// callbacks
static void record_safepoint_begin() NOT_MANAGEMENT_RETURN;
static void record_safepoint_synchronized() NOT_MANAGEMENT_RETURN;
static void record_safepoint_end() NOT_MANAGEMENT_RETURN;
+ static void record_safepoint_epilog(const char* operation_name) NOT_MANAGEMENT_RETURN;
static void record_application_start() NOT_MANAGEMENT_RETURN;
};
--- a/src/hotspot/share/utilities/constantTag.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/utilities/constantTag.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -100,6 +100,12 @@
bool is_dynamic_constant() const { return _tag == JVM_CONSTANT_Dynamic; }
bool is_invoke_dynamic() const { return _tag == JVM_CONSTANT_InvokeDynamic; }
+ bool has_bootstrap() const {
+ return (_tag == JVM_CONSTANT_Dynamic ||
+ _tag == JVM_CONSTANT_DynamicInError ||
+ _tag == JVM_CONSTANT_InvokeDynamic);
+ }
+
bool is_loadable_constant() const {
return ((_tag >= JVM_CONSTANT_Integer && _tag <= JVM_CONSTANT_String) ||
is_method_type() || is_method_handle() || is_dynamic_constant() ||
--- a/src/hotspot/share/utilities/debug.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/utilities/debug.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -61,6 +61,7 @@
#include "utilities/vmError.hpp"
#include <stdio.h>
+#include <stdarg.h>
// Support for showing register content on asserts/guarantees.
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
@@ -244,6 +245,22 @@
context = g_assertion_context;
}
#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
+#ifdef ASSERT
+ if (detail_fmt != NULL && ExecutingUnitTests) {
+ // Special handling for the sake of gtest death tests which expect the assert
+ // message to be printed in one short line to stderr (see TEST_VM_ASSERT_MSG) and
+ // cannot be tweaked to accept our normal assert message.
+ va_list detail_args_copy;
+ va_copy(detail_args_copy, detail_args);
+ ::fputs("assert failed: ", stderr);
+ ::vfprintf(stderr, detail_fmt, detail_args_copy);
+ ::fputs("\n", stderr);
+ ::fflush(stderr);
+ va_end(detail_args_copy);
+ }
+#endif
+
VMError::report_and_die(Thread::current_or_null(), context, file, line, error_msg, detail_fmt, detail_args);
va_end(detail_args);
}
@@ -293,21 +310,6 @@
report_vm_error(file, line, "Unimplemented()");
}
-#ifdef ASSERT
-bool is_executing_unit_tests() {
- return ExecutingUnitTests;
-}
-
-void report_assert_msg(const char* msg, ...) {
- va_list ap;
- va_start(ap, msg);
-
- fprintf(stderr, "assert failed: %s\n", err_msg(FormatBufferDummy(), msg, ap).buffer());
-
- va_end(ap);
-}
-#endif // ASSERT
-
void report_untested(const char* file, int line, const char* message) {
#ifndef PRODUCT
warning("Untested: %s in %s: %d\n", message, file, line);
--- a/src/hotspot/share/utilities/debug.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/utilities/debug.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -54,9 +54,6 @@
do { \
if (!(p)) { \
TOUCH_ASSERT_POISON; \
- if (is_executing_unit_tests()) { \
- report_assert_msg(__VA_ARGS__); \
- } \
report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", __VA_ARGS__); \
BREAKPOINT; \
} \
@@ -157,16 +154,10 @@
// ATTRIBUTE_PRINTF works with gcc >= 4.8 and any other compiler.
void report_vm_error(const char* file, int line, const char* error_msg,
const char* detail_fmt, ...) ATTRIBUTE_PRINTF(4, 5);
-#ifdef ASSERT
-void report_assert_msg(const char* msg, ...) ATTRIBUTE_PRINTF(1, 2);
-#endif // ASSERT
#else
// GCC < 4.8 warns because of empty format string. Warning can not be switched off selectively.
void report_vm_error(const char* file, int line, const char* error_msg,
const char* detail_fmt, ...);
-#ifdef ASSERT
-void report_assert_msg(const char* msg, ...);
-#endif // ASSERT
#endif
void report_vm_status_error(const char* file, int line, const char* error_msg,
int status, const char* detail);
@@ -178,11 +169,6 @@
void report_unimplemented(const char* file, int line);
void report_untested(const char* file, int line, const char* message);
-#ifdef ASSERT
-// unit test support
-bool is_executing_unit_tests();
-#endif // ASSERT
-
void warning(const char* format, ...) ATTRIBUTE_PRINTF(1, 2);
// Compile-time asserts. Cond must be a compile-time constant expression that
--- a/src/hotspot/share/utilities/events.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/utilities/events.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
EventLog* Events::_logs = NULL;
StringEventLog* Events::_messages = NULL;
-StringEventLog* Events::_exceptions = NULL;
+ExtendedStringEventLog* Events::_exceptions = NULL;
StringEventLog* Events::_redefinitions = NULL;
UnloadingEventLog* Events::_class_unloading = NULL;
StringEventLog* Events::_deopt_messages = NULL;
@@ -67,7 +67,7 @@
void Events::init() {
if (LogEvents) {
_messages = new StringEventLog("Events");
- _exceptions = new StringEventLog("Internal exceptions");
+ _exceptions = new ExtendedStringEventLog("Internal exceptions");
_redefinitions = new StringEventLog("Classes redefined");
_class_unloading = new UnloadingEventLog("Classes unloaded");
_deopt_messages = new StringEventLog("Deoptimization events");
--- a/src/hotspot/share/utilities/events.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/utilities/events.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -135,37 +135,43 @@
};
// A simple wrapper class for fixed size text messages.
-class StringLogMessage : public FormatBuffer<256> {
+template <size_t bufsz>
+class FormatStringLogMessage : public FormatBuffer<bufsz> {
public:
// Wrap this buffer in a stringStream.
stringStream stream() {
- return stringStream(_buf, size());
+ return stringStream(this->_buf, this->size());
}
};
+typedef FormatStringLogMessage<256> StringLogMessage;
+typedef FormatStringLogMessage<512> ExtendedStringLogMessage;
// A simple ring buffer of fixed size text messages.
-class StringEventLog : public EventLogBase<StringLogMessage> {
+template <size_t bufsz>
+class FormatStringEventLog : public EventLogBase< FormatStringLogMessage<bufsz> > {
public:
- StringEventLog(const char* name, int count = LogEventsBufferEntries) : EventLogBase<StringLogMessage>(name, count) {}
+ FormatStringEventLog(const char* name, int count = LogEventsBufferEntries) : EventLogBase< FormatStringLogMessage<bufsz> >(name, count) {}
void logv(Thread* thread, const char* format, va_list ap) ATTRIBUTE_PRINTF(3, 0) {
- if (!should_log()) return;
+ if (!this->should_log()) return;
- double timestamp = fetch_timestamp();
- MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
- int index = compute_log_index();
- _records[index].thread = thread;
- _records[index].timestamp = timestamp;
- _records[index].data.printv(format, ap);
+ double timestamp = this->fetch_timestamp();
+ MutexLockerEx ml(&this->_mutex, Mutex::_no_safepoint_check_flag);
+ int index = this->compute_log_index();
+ this->_records[index].thread = thread;
+ this->_records[index].timestamp = timestamp;
+ this->_records[index].data.printv(format, ap);
}
void log(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(3, 4) {
va_list ap;
va_start(ap, format);
- logv(thread, format, ap);
+ this->logv(thread, format, ap);
va_end(ap);
}
};
+typedef FormatStringEventLog<256> StringEventLog;
+typedef FormatStringEventLog<512> ExtendedStringEventLog;
class InstanceKlass;
@@ -189,7 +195,7 @@
// A log for internal exception related messages, like internal
// throws and implicit exceptions.
- static StringEventLog* _exceptions;
+ static ExtendedStringEventLog* _exceptions;
// Deoptization related messages
static StringEventLog* _deopt_messages;
@@ -307,6 +313,13 @@
out->cr();
}
+// Implement a printing routine for the ExtendedStringLogMessage
+template <>
+inline void EventLogBase<ExtendedStringLogMessage>::print(outputStream* out, ExtendedStringLogMessage& lm) {
+ out->print_raw(lm);
+ out->cr();
+}
+
// Place markers for the beginning and end up of a set of events.
// These end up in the default log.
class EventMark : public StackObj {
--- a/src/hotspot/share/utilities/exceptions.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/utilities/exceptions.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -411,7 +411,7 @@
// Pass through an Error, including BootstrapMethodError, any other form
// of linkage error, or say ThreadDeath/OutOfMemoryError
if (TraceMethodHandles) {
- tty->print_cr("[constant/invoke]dynamic passes through an Error for " INTPTR_FORMAT, p2i((void *)exception));
+ tty->print_cr("bootstrap method invocation wraps BSME around " INTPTR_FORMAT, p2i((void *)exception));
exception->print();
}
return;
--- a/src/hotspot/share/utilities/exceptions.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/src/hotspot/share/utilities/exceptions.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -237,7 +237,11 @@
// visible within the scope containing the THROW. Usually this is achieved by declaring the function
// with a TRAPS argument.
+#ifdef THIS_FILE
+#define THREAD_AND_LOCATION THREAD, THIS_FILE, __LINE__
+#else
#define THREAD_AND_LOCATION THREAD, __FILE__, __LINE__
+#endif
#define THROW_OOP(e) \
{ Exceptions::_throw_oop(THREAD_AND_LOCATION, e); return; }
--- a/src/java.base/share/classes/java/util/concurrent/CyclicBarrier.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/java.base/share/classes/java/util/concurrent/CyclicBarrier.java Sun Feb 17 09:54:08 2019 -0500
@@ -98,12 +98,11 @@
* }
* }}</pre>
*
- * Here, each worker thread processes a row of the matrix then waits at the
- * barrier until all rows have been processed. When all rows are processed
- * the supplied {@link Runnable} barrier action is executed and merges the
- * rows. If the merger
- * determines that a solution has been found then {@code done()} will return
- * {@code true} and each worker will terminate.
+ * Here, each worker thread processes a row of the matrix, then waits at the
+ * barrier until all rows have been processed. When all rows are processed the
+ * supplied {@link Runnable} barrier action is executed and merges the rows.
+ * If the merger determines that a solution has been found then {@code done()}
+ * will return {@code true} and each worker will terminate.
*
* <p>If the barrier action does not rely on the parties being suspended when
* it is executed, then any of the threads in the party could execute that
@@ -132,6 +131,7 @@
* corresponding {@code await()} in other threads.
*
* @see CountDownLatch
+ * @see Phaser
*
* @author Doug Lea
* @since 1.5
@@ -214,18 +214,17 @@
int index = --count;
if (index == 0) { // tripped
- boolean ranAction = false;
- try {
- final Runnable command = barrierCommand;
- if (command != null)
+ Runnable command = barrierCommand;
+ if (command != null) {
+ try {
command.run();
- ranAction = true;
- nextGeneration();
- return 0;
- } finally {
- if (!ranAction)
+ } catch (Throwable ex) {
breakBarrier();
+ throw ex;
+ }
}
+ nextGeneration();
+ return 0;
}
// loop until tripped, broken, interrupted, or timed out
--- a/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java Sun Feb 17 09:54:08 2019 -0500
@@ -445,8 +445,7 @@
* if to its current value). This would be extremely costly. So
* we relax it in several ways: (1) Producers only signal when
* their queue is possibly empty at some point during a push
- * operation (which requires conservatively checking size zero or
- * one to cover races). (2) Other workers propagate this signal
+ * operation. (2) Other workers propagate this signal
* when they find tasks in a queue with size greater than one. (3)
* Workers only enqueue after scanning (see below) and not finding
* any tasks. (4) Rather than CASing ctl to its current value in
@@ -762,10 +761,8 @@
/**
* The maximum number of top-level polls per worker before
- * checking other queues, expressed as a bit shift to, in effect,
- * multiply by pool size, and then use as random value mask, so
- * average bound is about poolSize*(1<<TOP_BOUND_SHIFT). See
- * above for rationale.
+ * checking other queues, expressed as a bit shift. See above for
+ * rationale.
*/
static final int TOP_BOUND_SHIFT = 10;
@@ -841,18 +838,17 @@
*/
final void push(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a;
- int s = top, d, cap, m;
+ int s = top, d = s - base, cap, m;
ForkJoinPool p = pool;
if ((a = array) != null && (cap = a.length) > 0) {
QA.setRelease(a, (m = cap - 1) & s, task);
top = s + 1;
- if (((d = s - (int)BASE.getAcquire(this)) & ~1) == 0 &&
- p != null) { // size 0 or 1
- VarHandle.fullFence();
- p.signalWork();
+ if (d == m)
+ growArray(false);
+ else if (QA.getAcquire(a, m & (s - 1)) == null && p != null) {
+ VarHandle.fullFence(); // was empty
+ p.signalWork(null);
}
- else if (d == m)
- growArray(false);
}
}
@@ -863,16 +859,16 @@
final boolean lockedPush(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a;
boolean signal = false;
- int s = top, b = base, cap, d;
+ int s = top, d = s - base, cap, m;
if ((a = array) != null && (cap = a.length) > 0) {
- a[(cap - 1) & s] = task;
+ a[(m = (cap - 1)) & s] = task;
top = s + 1;
- if (b - s + cap - 1 == 0)
+ if (d == m)
growArray(true);
else {
phase = 0; // full volatile unlock
- if (((s - base) & ~1) == 0) // size 0 or 1
- signal = true;
+ if (a[m & (s - 1)] == null)
+ signal = true; // was empty
}
}
return signal;
@@ -1014,25 +1010,30 @@
* queue, up to bound n (to avoid infinite unfairness).
*/
final void topLevelExec(ForkJoinTask<?> t, WorkQueue q, int n) {
- if (t != null && q != null) { // hoist checks
- int nstolen = 1;
- for (;;) {
+ int nstolen = 1;
+ for (int j = 0;;) {
+ if (t != null)
t.doExec();
- if (n-- < 0)
+ if (j++ <= n)
+ t = nextLocalTask();
+ else {
+ j = 0;
+ t = null;
+ }
+ if (t == null) {
+ if (q != null && (t = q.poll()) != null) {
+ ++nstolen;
+ j = 0;
+ }
+ else if (j != 0)
break;
- else if ((t = nextLocalTask()) == null) {
- if ((t = q.poll()) == null)
- break;
- else
- ++nstolen;
- }
}
- ForkJoinWorkerThread thread = owner;
- nsteals += nstolen;
- source = 0;
- if (thread != null)
- thread.afterTopLevelExec();
}
+ ForkJoinWorkerThread thread = owner;
+ nsteals += nstolen;
+ source = 0;
+ if (thread != null)
+ thread.afterTopLevelExec();
}
/**
@@ -1455,7 +1456,7 @@
if (!tryTerminate(false, false) && // possibly replace worker
w != null && w.array != null) // avoid repeated failures
- signalWork();
+ signalWork(null);
if (ex == null) // help clean on way out
ForkJoinTask.helpExpungeStaleExceptions();
@@ -1465,8 +1466,9 @@
/**
* Tries to create or release a worker if too few are running.
+ * @param q if non-null recheck if empty on CAS failure
*/
- final void signalWork() {
+ final void signalWork(WorkQueue q) {
for (;;) {
long c; int sp; WorkQueue[] ws; int i; WorkQueue v;
if ((c = ctl) >= 0L) // enough workers
@@ -1493,6 +1495,8 @@
LockSupport.unpark(vt);
break;
}
+ else if (q != null && q.isEmpty()) // no need to retry
+ break;
}
}
}
@@ -1613,19 +1617,24 @@
else if (rc <= 0 && (md & SHUTDOWN) != 0 &&
tryTerminate(false, false))
break; // quiescent shutdown
- else if (rc <= 0 && pred != 0 && phase == (int)c) {
- long nc = (UC_MASK & (c - TC_UNIT)) | (SP_MASK & pred);
- long d = keepAlive + System.currentTimeMillis();
- LockSupport.parkUntil(this, d);
- if (ctl == c && // drop on timeout if all idle
- d - System.currentTimeMillis() <= TIMEOUT_SLOP &&
- CTL.compareAndSet(this, c, nc)) {
- w.phase = QUIET;
- break;
+ else if (w.phase < 0) {
+ if (rc <= 0 && pred != 0 && phase == (int)c) {
+ long nc = (UC_MASK & (c - TC_UNIT)) | (SP_MASK & pred);
+ long d = keepAlive + System.currentTimeMillis();
+ LockSupport.parkUntil(this, d);
+ if (ctl == c && // drop on timeout if all idle
+ d - System.currentTimeMillis() <= TIMEOUT_SLOP &&
+ CTL.compareAndSet(this, c, nc)) {
+ w.phase = QUIET;
+ break;
+ }
+ }
+ else {
+ LockSupport.park(this);
+ if (w.phase < 0) // one spurious wakeup check
+ LockSupport.park(this);
}
}
- else if (w.phase < 0)
- LockSupport.park(this); // OK if spuriously woken
w.source = 0; // disable signal
}
}
@@ -1651,10 +1660,10 @@
QA.compareAndSet(a, k, t, null)) {
q.base = b;
w.source = qid;
- if (q.top - b > 0)
- signalWork();
+ if (a[(cap - 1) & b] != null)
+ signalWork(q); // help signal if more tasks
w.topLevelExec(t, q, // random fairness bound
- r & ((n << TOP_BOUND_SHIFT) - 1));
+ (r | (1 << TOP_BOUND_SHIFT)) & SMASK);
}
}
return true;
@@ -1900,7 +1909,7 @@
r = ThreadLocalRandom.advanceProbe(r);
else {
if (q.lockedPush(task))
- signalWork();
+ signalWork(null);
return;
}
}
--- a/src/java.base/share/classes/java/util/concurrent/ForkJoinWorkerThread.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinWorkerThread.java Sun Feb 17 09:54:08 2019 -0500
@@ -236,7 +236,8 @@
@Override // paranoically
public void setContextClassLoader(ClassLoader cl) {
- throw new SecurityException("setContextClassLoader");
+ if (cl != null && ClassLoader.getSystemClassLoader() != cl)
+ throw new SecurityException("setContextClassLoader");
}
}
}
--- a/src/java.base/share/classes/sun/security/ssl/Alert.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/java.base/share/classes/sun/security/ssl/Alert.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,7 @@
HANDSHAKE_FAILURE ((byte)40, "handshake_failure", true),
NO_CERTIFICATE ((byte)41, "no_certificate", true),
BAD_CERTIFICATE ((byte)42, "bad_certificate", true),
- UNSUPPORTED_CERTIFCATE ((byte)43, "unsupported_certificate", true),
+ UNSUPPORTED_CERTIFICATE ((byte)43, "unsupported_certificate", true),
CERTIFICATE_REVOKED ((byte)44, "certificate_revoked", true),
CERTIFICATE_EXPIRED ((byte)45, "certificate_expired", true),
CERTIFICATE_UNKNOWN ((byte)46, "certificate_unknown", true),
--- a/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -717,6 +717,13 @@
alert = chc.staplingActive ?
Alert.BAD_CERT_STATUS_RESPONSE :
Alert.CERTIFICATE_UNKNOWN;
+ } else if (reason == BasicReason.ALGORITHM_CONSTRAINED) {
+ alert = Alert.UNSUPPORTED_CERTIFICATE;
+ } else if (reason == BasicReason.EXPIRED) {
+ alert = Alert.CERTIFICATE_EXPIRED;
+ } else if (reason == BasicReason.INVALID_SIGNATURE ||
+ reason == BasicReason.NOT_YET_VALID) {
+ alert = Alert.BAD_CERTIFICATE;
}
}
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/OutputPropertiesFactory.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/OutputPropertiesFactory.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -20,15 +20,6 @@
package com.sun.org.apache.xml.internal.serializer;
-import com.sun.org.apache.xml.internal.serializer.utils.MsgKey;
-import com.sun.org.apache.xml.internal.serializer.utils.Utils;
-import com.sun.org.apache.xml.internal.serializer.utils.WrappedRuntimeException;
-import java.io.BufferedInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.Enumeration;
import java.util.Properties;
import javax.xml.transform.OutputKeys;
import jdk.xml.internal.SecuritySupport;
@@ -68,18 +59,18 @@
* of the corresponding character, like this one: <br> quot=34 <br>
*
* <li> <b>S_USE_URL_ESCAPING </b> -
- * This non-standard property key is used to set a value of "yes" if the href values for HTML serialization should
- * use %xx escaping.
+ * This non-standard property key is used to set a value of "yes" if the href values
+ * for HTML serialization should use %xx escaping.
*
* <li> <b>S_OMIT_META_TAG </b> -
- * This non-standard property key is used to set a value of "yes" if the META tag should be omitted where it would
- * otherwise be supplied.
+ * This non-standard property key is used to set a value of "yes" if the META tag
+ * should be omitted where it would otherwise be supplied.
* </ul>
*
* @see SerializerFactory
* @see Method
* @see Serializer
- * @LastModified: Oct 2017
+ * @LastModified: Feb 2019
*/
public final class OutputPropertiesFactory
{
@@ -147,14 +138,15 @@
S_BUILTIN_EXTENSIONS_UNIVERSAL + "entities";
/**
- * This non-standard property key is used to set a value of "yes" if the href values for HTML serialization should
- * use %xx escaping. */
+ * This non-standard property key is used to set a value of "yes" if the href
+ * values for HTML serialization should use %xx escaping.
+ */
public static final String S_USE_URL_ESCAPING =
S_BUILTIN_EXTENSIONS_UNIVERSAL + "use-url-escaping";
/**
- * This non-standard property key is used to set a value of "yes" if the META tag should be omitted where it would
- * otherwise be supplied.
+ * This non-standard property key is used to set a value of "yes" if the META
+ * tag should be omitted where it would otherwise be supplied.
*/
public static final String S_OMIT_META_TAG =
S_BUILTIN_EXTENSIONS_UNIVERSAL + "omit-meta-tag";
@@ -174,53 +166,133 @@
S_BUILTIN_OLD_EXTENSIONS_UNIVERSAL.length();
/**
- * This non-standard, Oracle-impl only property key is used as if OutputKeys.STANDALONE is specified but
- * without writing it out in the declaration; It can be used to reverse the change by Xalan patch 1495.
- * Since Xalan patch 1495 can cause incompatible behavior, this property is add for application to neutralize
- * the effect of Xalan patch 1495
+ * This non-standard, Oracle-impl only property key is used as if
+ * OutputKeys.STANDALONE is specified but without writing it out in the declaration;
+ * It can be used to reverse the change by Xalan patch 1495.
+ * Since Xalan patch 1495 can cause incompatible behavior, this property is
+ * added for application to neutralize the effect of Xalan patch 1495
*/
- /**
- * <p>Is Standalone</p>
- *
- * <ul>
- * <li>
- * <code>yes</code> to indicate the output is intended to be used as standalone
- * </li>
- * <li>
- * <code>no</code> has no effect.
- * </li>
- * </ul>
- */
+ /**
+ * <p>Is Standalone</p>
+ *
+ * <ul>
+ * <li>
+ * <code>yes</code> to indicate the output is intended to be used as standalone
+ * </li>
+ * <li>
+ * <code>no</code> has no effect.
+ * </li>
+ * </ul>
+ */
public static final String ORACLE_IS_STANDALONE = "http://www.oracle.com/xml/is-standalone";
//************************************************************
//* PRIVATE CONSTANTS
//************************************************************
- private static final String S_XSLT_PREFIX = "xslt.output.";
- private static final int S_XSLT_PREFIX_LEN = S_XSLT_PREFIX.length();
- private static final String S_XALAN_PREFIX = "org.apache.xslt.";
- private static final int S_XALAN_PREFIX_LEN = S_XALAN_PREFIX.length();
+ /*
+ * XSLT properties do not need namespace qualification.
+ *
+ * Xalan-specific output properties can be overridden in the stylesheet
+ * assigning a xalan namespace. For example:
+ * <xsl:stylesheet version="1.0"
+ * xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ * xmlns:xalan="http://xml.apache.org/xalan">
+ * <xsl:output method="html" encoding="UTF-8"
+ * xalan:content-handler="MyContentHandler"/>
+ * ...
+ */
+ private static final String[] PROP_XML = {
+ "method",
+ "version",
+ "encoding",
+ "indent",
+ "omit-xml-declaration",
+ "standalone",
+ "media-type",
+ "{http://xml.apache.org/xalan}indent-amount",
+ "{http://xml.apache.org/xalan}content-handler",
+ "{http://xml.apache.org/xalan}entities"
+ };
- /** Synchronization object for lazy initialization of the above tables. */
- private static final Object m_synch_object = new Object();
+ private static final String[] PROP_XML_VALUE = {
+ "xml",
+ "1.0",
+ "UTF-8",
+ "no",
+ "no",
+ "no",
+ "text/xml",
+ "0",
+ "com.sun.org.apache.xml.internal.serializer.ToXMLStream",
+ "com/sun/org/apache/xml/internal/serializer/XMLEntities"
+ };
- /** the directory in which the various method property files are located */
- private static final String PROP_DIR = "com/sun/org/apache/xml/internal/serializer/";
- /** property file for default XML properties */
- private static final String PROP_FILE_XML = "output_xml.properties";
- /** property file for default TEXT properties */
- private static final String PROP_FILE_TEXT = "output_text.properties";
- /** property file for default HTML properties */
- private static final String PROP_FILE_HTML = "output_html.properties";
- /** property file for default UNKNOWN (Either XML or HTML, to be determined later) properties */
- private static final String PROP_FILE_UNKNOWN = "output_unknown.properties";
+ private static final String[] PROP_HTML = {
+ "method",
+ "indent",
+ "media",
+ "version",
+ "{http://xml.apache.org/xalan}indent-amount",
+ "{http://xml.apache.org/xalan}content-handler",
+ "{http://xml.apache.org/xalan}entities",
+ "{http://xml.apache.org/xalan}use-url-escaping",
+ "{http://xml.apache.org/xalan}omit-meta-tag"
+ };
+
+ private static final String[] PROP_HTML_VALUE = {
+ "html",
+ "yes",
+ "text/html",
+ "4.0",
+ "4",
+ "com.sun.org.apache.xml.internal.serializer.ToHTMLStream",
+ "com/sun/org/apache/xml/internal/serializer/HTMLEntities",
+ "yes",
+ "no"
+ };
+
+ private static final String[] PROP_TEXT = {
+ "method",
+ "media-type",
+ "{http://xml.apache.org/xalan}content-handler"
+ };
+
+ private static final String[] PROP_TEXT_VALUE = {
+ "text",
+ "text/plain",
+ "com.sun.org.apache.xml.internal.serializer.ToTextStream"
+ };
+
+ private static final String[] PROP_UNKNOWN = {
+ "method",
+ "version",
+ "encoding",
+ "indent",
+ "omit-xml-declaration",
+ "standalone",
+ "media-type",
+ "{http://xml.apache.org/xalan}indent-amount",
+ "{http://xml.apache.org/xalan}content-handler"
+ };
+
+ private static final String[] PROP_UNKNOWN_VALUE = {
+ "xml",
+ "1.0",
+ "UTF-8",
+ "no",
+ "no",
+ "no",
+ "text/xml",
+ "0",
+ "com.sun.org.apache.xml.internal.serializer.ToUnknownStream",
+ };
//************************************************************
//* PRIVATE STATIC FIELDS
//************************************************************
- /** The default properties of all output files. */
+ /** The default properties for all other than html and text. */
private static Properties m_xml_properties = null;
/** The default properties when method="html". */
@@ -232,38 +304,8 @@
/** The properties when method="" for the "unknown" wrapper */
private static Properties m_unknown_properties = null;
- private static final Class<?>
- ACCESS_CONTROLLER_CLASS = findAccessControllerClass();
-
- private static Class<?> findAccessControllerClass() {
- try
- {
- // This Class was introduced in JDK 1.2. With the re-architecture of
- // security mechanism ( starting in JDK 1.2 ), we have option of
- // giving privileges to certain part of code using doPrivileged block.
- // In JDK1.1.X applications won't be having security manager and if
- // there is security manager ( in applets ), code need to be signed
- // and trusted for having access to resources.
-
- return Class.forName("java.security.AccessController");
- }
- catch (Exception e)
- {
- //User may be using older JDK ( JDK <1.2 ). Allow him/her to use it.
- // But don't try to use doPrivileged
- }
-
- return null;
- }
-
/**
- * Creates an empty OutputProperties with the property key/value defaults specified by
- * a property file. The method argument is used to construct a string of
- * the form output_[method].properties (for instance, output_html.properties).
- * The output_xml.properties file is always used as the base.
- *
- * <p>Anything other than 'text', 'xml', and 'html', will
- * use the output_xml.properties file.</p>
+ * Returns a Properties based on the specified method. The default is xml.
*
* @param method non-null reference to method name.
*
@@ -271,265 +313,71 @@
*/
static public final Properties getDefaultMethodProperties(String method)
{
- String fileName = null;
Properties defaultProperties = null;
- // According to this article : Double-check locking does not work
- // http://www.javaworld.com/javaworld/jw-02-2001/jw-0209-toolbox.html
- try
- {
- synchronized (m_synch_object)
- {
- if (null == m_xml_properties) // double check
- {
- fileName = PROP_FILE_XML;
- m_xml_properties = loadPropertiesFile(fileName, null);
- }
- }
- if (method.equals(Method.XML))
- {
+ if (null == m_xml_properties) {
+ m_xml_properties = initProperties(PROP_XML, PROP_XML_VALUE, null);
+ }
+
+
+ switch (method) {
+ case Method.XML:
defaultProperties = m_xml_properties;
- }
- else if (method.equals(Method.HTML))
- {
- if (null == m_html_properties) // double check
- {
- fileName = PROP_FILE_HTML;
- m_html_properties =
- loadPropertiesFile(fileName, m_xml_properties);
+ break;
+ case Method.HTML:
+ if (null == m_html_properties) {
+ m_html_properties = initProperties(
+ PROP_HTML, PROP_HTML_VALUE, m_xml_properties);
}
-
defaultProperties = m_html_properties;
- }
- else if (method.equals(Method.TEXT))
- {
- if (null == m_text_properties) // double check
- {
- fileName = PROP_FILE_TEXT;
- m_text_properties =
- loadPropertiesFile(fileName, m_xml_properties);
- if (null
- == m_text_properties.getProperty(OutputKeys.ENCODING))
+ break;
+ case Method.TEXT:
+ if (null == m_text_properties) {
+ m_text_properties = initProperties(
+ PROP_TEXT, PROP_TEXT_VALUE, m_xml_properties);
+
+ if (null == m_text_properties.getProperty(OutputKeys.ENCODING))
{
String mimeEncoding = Encodings.getMimeEncoding(null);
- m_text_properties.put(
- OutputKeys.ENCODING,
- mimeEncoding);
+ m_text_properties.put(OutputKeys.ENCODING, mimeEncoding);
}
}
-
defaultProperties = m_text_properties;
- }
- else if (method.equals(com.sun.org.apache.xml.internal.serializer.Method.UNKNOWN))
- {
- if (null == m_unknown_properties) // double check
- {
- fileName = PROP_FILE_UNKNOWN;
- m_unknown_properties =
- loadPropertiesFile(fileName, m_xml_properties);
+ break;
+ case com.sun.org.apache.xml.internal.serializer.Method.UNKNOWN:
+ if (null == m_unknown_properties) {
+ m_unknown_properties = initProperties(
+ PROP_UNKNOWN, PROP_UNKNOWN_VALUE, m_xml_properties);
}
-
defaultProperties = m_unknown_properties;
- }
- else
- {
- // TODO: Calculate res file from name.
+ break;
+ default:
defaultProperties = m_xml_properties;
- }
+ break;
}
- catch (IOException ioe)
- {
- throw new WrappedRuntimeException(
- Utils.messages.createMessage(
- MsgKey.ER_COULD_NOT_LOAD_METHOD_PROPERTY,
- new Object[] { fileName, method }),
- ioe);
- }
+
// wrap these cached defaultProperties in a new Property object just so
// that the caller of this method can't modify the default values
return new Properties(defaultProperties);
}
/**
- * Load the properties file from a resource stream. If a
- * key name such as "org.apache.xslt.xxx", fix up the start of
- * string to be a curly namespace. If a key name starts with
- * "xslt.output.xxx", clip off "xslt.output.". If a key name *or* a
- * key value is discovered, check for \u003a in the text, and
- * fix it up to be ":", since earlier versions of the JDK do not
- * handle the escape sequence (at least in key names).
+ * Initiates the properties
*
- * @param resourceName non-null reference to resource name.
+ * @param keys an array of keys
+ * @param values values corresponding to the keys
* @param defaults Default properties, which may be null.
*/
- static private Properties loadPropertiesFile(
- final String resourceName,
- Properties defaults)
- throws IOException
+ static private Properties initProperties(String[] keys, String[] values, Properties defaults)
{
-
- // This static method should eventually be moved to a thread-specific class
- // so that we can cache the ContextClassLoader and bottleneck all properties file
- // loading throughout Xalan.
-
Properties props = new Properties(defaults);
- InputStream is = null;
- BufferedInputStream bis = null;
-
- try
- {
- if (ACCESS_CONTROLLER_CLASS != null)
- {
- is = AccessController.doPrivileged(new PrivilegedAction<InputStream>() {
- public InputStream run()
- {
- return OutputPropertiesFactory.class
- .getResourceAsStream(resourceName);
- }
- });
- }
- else
- {
- // User may be using older JDK ( JDK < 1.2 )
- is = OutputPropertiesFactory.class
- .getResourceAsStream(resourceName);
- }
-
- bis = new BufferedInputStream(is);
- props.load(bis);
- }
- catch (IOException ioe)
- {
- if (defaults == null)
- {
- throw ioe;
- }
- else
- {
- throw new WrappedRuntimeException(
- Utils.messages.createMessage(
- MsgKey.ER_COULD_NOT_LOAD_RESOURCE,
- new Object[] { resourceName }),
- ioe);
- //"Could not load '"+resourceName+"' (check CLASSPATH), now using just the defaults ", ioe);
- }
- }
- catch (SecurityException se)
- {
- // Repeat IOException handling for sandbox/applet case -sc
- if (defaults == null)
- {
- throw se;
- }
- else
- {
- throw new WrappedRuntimeException(
- Utils.messages.createMessage(
- MsgKey.ER_COULD_NOT_LOAD_RESOURCE,
- new Object[] { resourceName }),
- se);
- //"Could not load '"+resourceName+"' (check CLASSPATH, applet security), now using just the defaults ", se);
- }
- }
- finally
- {
- if (bis != null)
- {
- bis.close();
- }
- if (is != null)
- {
- is.close();
- }
- }
-
- // Note that we're working at the HashTable level here,
- // and not at the Properties level! This is important
- // because we don't want to modify the default properties.
- // NB: If fixupPropertyString ends up changing the property
- // name or value, we need to remove the old key and re-add
- // with the new key and value. However, then our Enumeration
- // could lose its place in the HashTable. So, we first
- // clone the HashTable and enumerate over that since the
- // clone will not change. When we migrate to Collections,
- // this code should be revisited and cleaned up to use
- // an Iterator which may (or may not) alleviate the need for
- // the clone. Many thanks to Padraig O'hIceadha
- // <padraig@gradient.ie> for finding this problem. Bugzilla 2000.
-
- Enumeration<Object> keys = ((Properties) props.clone()).keys();
- while (keys.hasMoreElements())
- {
- String key = (String) keys.nextElement();
- // Now check if the given key was specified as a
- // System property. If so, the system property
- // overides the default value in the propery file.
- String value = null;
- try
- {
- value = SecuritySupport.getSystemProperty(key);
- }
- catch (SecurityException se)
- {
- // No-op for sandbox/applet case, leave null -sc
- }
- if (value == null)
- value = (String) props.get(key);
-
- String newKey = fixupPropertyString(key, true);
- String newValue = null;
- try
- {
- newValue = SecuritySupport.getSystemProperty(newKey);
- }
- catch (SecurityException se)
- {
- // No-op for sandbox/applet case, leave null -sc
- }
- if (newValue == null)
- newValue = fixupPropertyString(value, false);
- else
- newValue = fixupPropertyString(newValue, false);
-
- if (key != newKey || value != newValue)
- {
- props.remove(key);
- props.put(newKey, newValue);
- }
-
+ for (int i = 0; i < keys.length; i++) {
+ // check System Property. This is kept as is for binary compatibility
+ String sys = SecuritySupport.getSystemProperty(keys[i]);
+ props.put(keys[i], (sys == null) ? values[i] : sys);
}
return props;
}
-
- /**
- * Fix up a string in an output properties file according to
- * the rules of {@link #loadPropertiesFile}.
- *
- * @param s non-null reference to string that may need to be fixed up.
- * @return A new string if fixup occured, otherwise the s argument.
- */
- static private String fixupPropertyString(String s, boolean doClipping)
- {
- int index;
- if (doClipping && s.startsWith(S_XSLT_PREFIX))
- {
- s = s.substring(S_XSLT_PREFIX_LEN);
- }
- if (s.startsWith(S_XALAN_PREFIX))
- {
- s =
- S_BUILTIN_EXTENSIONS_UNIVERSAL
- + s.substring(S_XALAN_PREFIX_LEN);
- }
- if ((index = s.indexOf("\\u003a")) > 0)
- {
- String temp = s.substring(index + 6);
- s = s.substring(0, index) + ":" + temp;
-
- }
- return s;
- }
-
}
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/output_html.properties Fri Feb 15 17:41:06 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-###########################################################################
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
-###########################################################################
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##
-#
-# Specify defaults when method="html". These defaults use output_xml.properties
-# as a base.
-#
-
-# XSLT properties do not need namespace qualification.
-method=html
-indent=yes
-media-type=text/html
-version=4.0
-
-# Xalan-specific output properties. These can be overridden in the stylesheet
-# assigning a xalan namespace. For example:
-# <xsl:stylesheet version="1.0"
-# xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
-# xmlns:xalan="http://xml.apache.org/xalan">
-# <xsl:output method="html" encoding="UTF-8"
-# xalan:content-handler="MyContentHandler"/>
-# ...
-# Note that the colon after the protocol needs to be escaped.
-{http\u003a//xml.apache.org/xalan}indent-amount=4
-{http\u003a//xml.apache.org/xalan}content-handler=com.sun.org.apache.xml.internal.serializer.ToHTMLStream
-{http\u003a//xml.apache.org/xalan}entities=com/sun/org/apache/xml/internal/serializer/HTMLEntities
-{http\u003a//xml.apache.org/xalan}use-url-escaping=yes
-{http\u003a//xml.apache.org/xalan}omit-meta-tag=no
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/output_text.properties Fri Feb 15 17:41:06 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-###########################################################################
-# reserved comment block
-# DO NOT REMOVE OR ALTER!
-###########################################################################
-##########################################################################
-# Copyright 2003-2004 The Apache Software Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-#
-# $Id: output_text.properties,v 1.3 2005/09/28 13:49:10 pvedula Exp $
-#
-# Specify defaults when method="text".
-#
-
-# XSLT properties do not need namespace qualification.
-method=text
-media-type=text/plain
-
-# Xalan-specific output properties. These can be overridden in the stylesheet
-# assigning a xalan namespace. For example:
-# <xsl:stylesheet version="1.0"
-# xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
-# xmlns:xalan="http://xml.apache.org/xalan">
-# <xsl:output method="html" encoding="UTF-8"
-# xalan:content-handler="MyContentHandler"/>
-# ...
-# Note that the colon after the protocol needs to be escaped.
-{http\u003a//xml.apache.org/xalan}content-handler=com.sun.org.apache.xml.internal.serializer.ToTextStream
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/output_unknown.properties Fri Feb 15 17:41:06 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-###########################################################################
-# reserved comment block
-# DO NOT REMOVE OR ALTER!
-###########################################################################
-###########################################################################
-# Copyright 2003-2004 The Apache Software Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-#
-# $Id: output_unknown.properties,v 1.2.4.1 2005/09/15 08:15:33 suresh_emailid Exp $
-#
-# Specify defaults when no method="..." is specified.
-# This type of output will quickly switch to "xml" or "html"
-# depending on the first element name.
-#
-
-# XSLT properties do not need namespace qualification.
-method=xml
-version=1.0
-encoding=UTF-8
-indent=no
-omit-xml-declaration=no
-standalone=no
-media-type=text/xml
-
-# Xalan-specific output properties. These can be overridden in the stylesheet
-# assigning a xalan namespace. For example:
-# <xsl:stylesheet version="1.0"
-# xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
-# xmlns:xalan="http://xml.apache.org/xalan">
-# <xsl:output method="html" encoding="UTF-8"
-# xalan:content-handler="MyContentHandler"/>
-# ...
-# Note that the colon after the protocol needs to be escaped.
-{http\u003a//xml.apache.org/xalan}indent-amount=0
-{http\u003a//xml.apache.org/xalan}content-handler=com.sun.org.apache.xml.internal.serializer.ToUnknownStream
-
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/output_xml.properties Fri Feb 15 17:41:06 2019 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-###########################################################################
-# reserved comment block
-# DO NOT REMOVE OR ALTER!
-###########################################################################
-###########################################################################
-# Copyright 2003-2004 The Apache Software Foundation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-##########################################################################
-#
-# $Id: output_xml.properties,v 1.2.4.1 2005/09/15 08:15:33 suresh_emailid Exp $
-#
-# Specify defaults when method="xml". These defaults serve as a base for
-# other defaults, such as output_html and output_text.
-#
-
-# XSLT properties do not need namespace qualification.
-method=xml
-version=1.0
-encoding=UTF-8
-indent=no
-omit-xml-declaration=no
-standalone=no
-media-type=text/xml
-
-# Xalan-specific output properties. These can be overridden in the stylesheet
-# assigning a xalan namespace. For example:
-# <xsl:stylesheet version="1.0"
-# xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
-# xmlns:xalan="http://xml.apache.org/xalan">
-# <xsl:output method="html" encoding="UTF-8"
-# xalan:content-handler="MyContentHandler"/>
-# ...
-# Note that the colon after the protocol needs to be escaped.
-{http\u003a//xml.apache.org/xalan}indent-amount=0
-{http\u003a//xml.apache.org/xalan}content-handler=com.sun.org.apache.xml.internal.serializer.ToXMLStream
-{http\u003a//xml.apache.org/xalan}entities=com/sun/org/apache/xml/internal/serializer/XMLEntities
-
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/api/JavacTrees.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/api/JavacTrees.java Sun Feb 17 09:54:08 2019 -0500
@@ -1152,7 +1152,7 @@
try {
switch (kind) {
case ERROR:
- log.error(DiagnosticFlag.MULTIPLE, pos, Errors.ProcMessager(msg.toString()));
+ log.error(DiagnosticFlag.API, pos, Errors.ProcMessager(msg.toString()));
break;
case WARNING:
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java Sun Feb 17 09:54:08 2019 -0500
@@ -1014,7 +1014,11 @@
* Parses a list of files.
*/
public List<JCCompilationUnit> parseFiles(Iterable<JavaFileObject> fileObjects) {
- if (shouldStop(CompileState.PARSE))
+ return parseFiles(fileObjects, false);
+ }
+
+ public List<JCCompilationUnit> parseFiles(Iterable<JavaFileObject> fileObjects, boolean force) {
+ if (!force && shouldStop(CompileState.PARSE))
return List.nil();
//parse all files
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Option.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Option.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
import java.io.PrintWriter;
import java.lang.module.ModuleDescriptor;
import java.nio.file.Files;
+import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.Collator;
@@ -752,14 +753,18 @@
@Override
public void process(OptionHelper helper, String option) throws InvalidValueException {
if (option.endsWith(".java") ) {
- Path p = Paths.get(option);
- if (!Files.exists(p)) {
- throw helper.newInvalidValueException(Errors.FileNotFound(p.toString()));
+ try {
+ Path p = Paths.get(option);
+ if (!Files.exists(p)) {
+ throw helper.newInvalidValueException(Errors.FileNotFound(p.toString()));
+ }
+ if (!Files.isRegularFile(p)) {
+ throw helper.newInvalidValueException(Errors.FileNotFile(p));
+ }
+ helper.addFile(p);
+ } catch (InvalidPathException ex) {
+ throw helper.newInvalidValueException(Errors.InvalidPath(option));
}
- if (!Files.isRegularFile(p)) {
- throw helper.newInvalidValueException(Errors.FileNotFile(p));
- }
- helper.addFile(p);
} else {
helper.addClassName(option);
}
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacMessager.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacMessager.java Sun Feb 17 09:54:08 2019 -0500
@@ -34,6 +34,7 @@
import com.sun.tools.javac.util.JCDiagnostic.DiagnosticFlag;
import com.sun.tools.javac.tree.JCTree;
import com.sun.tools.javac.tree.JCTree.*;
+import java.util.Set;
import javax.lang.model.element.*;
import javax.tools.JavaFileObject;
import javax.tools.Diagnostic;
@@ -117,7 +118,7 @@
switch (kind) {
case ERROR:
errorCount++;
- log.error(DiagnosticFlag.MULTIPLE, pos, Errors.ProcMessager(msg.toString()));
+ log.error(DiagnosticFlag.API, pos, Errors.ProcMessager(msg.toString()));
break;
case WARNING:
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java Sun Feb 17 09:54:08 2019 -0500
@@ -35,6 +35,7 @@
import java.nio.file.Path;
import java.util.*;
import java.util.Map.Entry;
+import java.util.function.Predicate;
import java.util.regex.*;
import java.util.stream.Collectors;
@@ -83,6 +84,7 @@
import com.sun.tools.javac.util.DefinedBy.Api;
import com.sun.tools.javac.util.Iterators;
import com.sun.tools.javac.util.JCDiagnostic;
+import com.sun.tools.javac.util.JCDiagnostic.DiagnosticFlag;
import com.sun.tools.javac.util.JavacMessages;
import com.sun.tools.javac.util.List;
import com.sun.tools.javac.util.Log;
@@ -1066,7 +1068,9 @@
prev.newRound();
this.genClassFiles = prev.genClassFiles;
- List<JCCompilationUnit> parsedFiles = compiler.parseFiles(newSourceFiles);
+ //parse the generated files even despite errors reported so far, to eliminate
+ //recoverable errors related to the type declared in the generated files:
+ List<JCCompilationUnit> parsedFiles = compiler.parseFiles(newSourceFiles, true);
roots = prev.roots.appendList(parsedFiles);
// Check for errors after parsing
@@ -1233,15 +1237,17 @@
}
void showDiagnostics(boolean showAll) {
- Set<JCDiagnostic.Kind> kinds = EnumSet.allOf(JCDiagnostic.Kind.class);
- if (!showAll) {
- // suppress errors, which are all presumed to be transient resolve errors
- kinds.remove(JCDiagnostic.Kind.ERROR);
- }
- deferredDiagnosticHandler.reportDeferredDiagnostics(kinds);
+ deferredDiagnosticHandler.reportDeferredDiagnostics(showAll ? ACCEPT_ALL
+ : ACCEPT_NON_RECOVERABLE);
log.popDiagnosticHandler(deferredDiagnosticHandler);
compiler.setDeferredDiagnosticHandler(null);
}
+ //where:
+ private final Predicate<JCDiagnostic> ACCEPT_NON_RECOVERABLE =
+ d -> d.getKind() != JCDiagnostic.Kind.ERROR ||
+ !d.isFlagSet(DiagnosticFlag.RECOVERABLE) ||
+ d.isFlagSet(DiagnosticFlag.API);
+ private final Predicate<JCDiagnostic> ACCEPT_ALL = d -> true;
/** Print info about this round. */
private void printRoundInfo(boolean lastRound) {
@@ -1335,7 +1341,7 @@
errorStatus = round.unrecoverableError();
moreToDo = moreToDo();
- round.showDiagnostics(errorStatus || showResolveErrors);
+ round.showDiagnostics(showResolveErrors);
// Set up next round.
// Copy mutable collections returned from filer.
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties Sun Feb 17 09:54:08 2019 -0500
@@ -1977,6 +1977,11 @@
compiler.warn.invalid.path=\
Invalid filename: {0}
+# 0: string
+compiler.err.invalid.path=\
+ Invalid filename: {0}
+
+
# 0: path
compiler.warn.invalid.archive.file=\
Unexpected file on path: {0}
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/util/JCDiagnostic.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/util/JCDiagnostic.java Sun Feb 17 09:54:08 2019 -0500
@@ -429,9 +429,9 @@
RECOVERABLE,
NON_DEFERRABLE,
COMPRESSED,
- /** Print multiple errors for same source locations.
+ /** Flag for diagnostics that were reported through API methods.
*/
- MULTIPLE,
+ API,
/** Flag for not-supported-in-source-X errors.
*/
SOURCE_LEVEL;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/util/Log.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/util/Log.java Sun Feb 17 09:54:08 2019 -0500
@@ -33,6 +33,7 @@
import java.util.Map;
import java.util.Queue;
import java.util.Set;
+import java.util.function.Predicate;
import javax.tools.DiagnosticListener;
import javax.tools.JavaFileObject;
@@ -158,14 +159,14 @@
/** Report all deferred diagnostics. */
public void reportDeferredDiagnostics() {
- reportDeferredDiagnostics(EnumSet.allOf(JCDiagnostic.Kind.class));
+ reportDeferredDiagnostics(d -> true);
}
/** Report selected deferred diagnostics. */
- public void reportDeferredDiagnostics(Set<JCDiagnostic.Kind> kinds) {
+ public void reportDeferredDiagnostics(Predicate<JCDiagnostic> accepter) {
JCDiagnostic d;
while ((d = deferred.poll()) != null) {
- if (kinds.contains(d.getKind()))
+ if (accepter.test(d))
prev.report(d);
}
deferred = null; // prevent accidental ongoing use
@@ -713,7 +714,7 @@
case ERROR:
if (nerrors < MaxErrors &&
- (diagnostic.isFlagSet(DiagnosticFlag.MULTIPLE) ||
+ (diagnostic.isFlagSet(DiagnosticFlag.API) ||
shouldReport(diagnostic))) {
writeDiagnostic(diagnostic);
nerrors++;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/BigIntegerIntrinsicsTest.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/BigIntegerIntrinsicsTest.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,22 +21,23 @@
* questions.
*/
-
package org.graalvm.compiler.hotspot.test;
+import java.lang.reflect.InvocationTargetException;
import java.math.BigInteger;
import java.util.Random;
import org.graalvm.compiler.api.test.Graal;
+import org.graalvm.compiler.core.test.GraalCompilerTest;
import org.graalvm.compiler.hotspot.GraalHotSpotVMConfig;
import org.graalvm.compiler.hotspot.HotSpotGraalRuntimeProvider;
-import org.graalvm.compiler.replacements.test.MethodSubstitutionTest;
import org.graalvm.compiler.runtime.RuntimeProvider;
import org.junit.Test;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.code.InstalledCode;
+import jdk.vm.ci.code.InvalidInstalledCodeException;
import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
@@ -52,7 +53,7 @@
* is not tested per se (only execution based on admissible intrinsics).
*
*/
-public final class BigIntegerIntrinsicsTest extends MethodSubstitutionTest {
+public final class BigIntegerIntrinsicsTest extends GraalCompilerTest {
static final int N = 100;
@@ -149,8 +150,8 @@
assertDeepEquals(res1, res2);
- // Invoke BigInteger testMontgomeryAux(BigInteger, BigExp, BigInteger) through code
- // handle.
+ // Invoke BigInteger testMontgomeryAux(BigInteger, BigExp, BigInteger)
+ // through code handle.
BigInteger res3 = (BigInteger) tin.invokeCode(big1, bigTwo, big2);
assertDeepEquals(res1, res3);
@@ -168,7 +169,6 @@
private class TestIntrinsic {
TestIntrinsic(String testmname, Class<?> javaclass, String javamname, Class<?>... params) {
-
javamethod = getResolvedJavaMethod(javaclass, javamname, params);
testmethod = getResolvedJavaMethod(testmname);
@@ -179,21 +179,39 @@
testcode = getCode(testmethod);
assert testcode != null;
+ assert testcode.isValid();
}
Object invokeJava(BigInteger big, Object... args) {
-
return invokeSafe(javamethod, big, args);
}
Object invokeTest(Object... args) {
-
return invokeSafe(testmethod, null, args);
}
Object invokeCode(Object... args) {
+ try {
+ return testcode.executeVarargs(args);
+ }
+ catch (InvalidInstalledCodeException e) {
+ // Ensure the installed code is valid, possibly recompiled.
+ testcode = getCode(testmethod);
- return executeVarargsSafe(testcode, args);
+ assert testcode != null;
+ assert testcode.isValid();
+
+ return invokeCode(args);
+ }
+ }
+
+ private Object invokeSafe(ResolvedJavaMethod method, Object receiver, Object... args) {
+ try {
+ return invoke(method, receiver, args);
+ } catch (IllegalAccessException | InvocationTargetException |
+ IllegalArgumentException | InstantiationException e) {
+ throw new RuntimeException(e);
+ }
}
// Private data section:
@@ -202,7 +220,8 @@
private InstalledCode testcode;
}
- private static GraalHotSpotVMConfig config = ((HotSpotGraalRuntimeProvider) Graal.getRequiredCapability(RuntimeProvider.class)).getVMConfig();
+ private static GraalHotSpotVMConfig config =
+ ((HotSpotGraalRuntimeProvider) Graal.getRequiredCapability(RuntimeProvider.class)).getVMConfig();
private static BigInteger bigTwo = BigInteger.valueOf(2);
private static Random rnd = new Random(17);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/StringCompressInflateTest.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/StringCompressInflateTest.java Sun Feb 17 09:54:08 2019 -0500
@@ -299,7 +299,7 @@
TestMethods(String testmname, Class<?> javaclass, Class<?> intrinsicClass, String javamname, Class<?>... params) {
javamethod = getResolvedJavaMethod(javaclass, javamname, params);
testmethod = getResolvedJavaMethod(testmname);
- testgraph = testGraph(testmname, javamname);
+ testgraph = getReplacements().getIntrinsicGraph(javamethod, CompilationIdentifier.INVALID_COMPILATION_ID, getDebugContext());
assertInGraph(testgraph, intrinsicClass);
assert javamethod != null;
--- a/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/Commands.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/Commands.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1128,6 +1128,22 @@
}
}
+ void commandDbgTrace(StringTokenizer t) {
+ int traceFlags;
+ if (t.hasMoreTokens()) {
+ String flagStr = t.nextToken();
+ try {
+ traceFlags = Integer.decode(flagStr).intValue();
+ } catch (NumberFormatException nfe) {
+ MessageOutput.println("dbgtrace command value must be an integer:", flagStr);
+ return;
+ }
+ } else {
+ traceFlags = VirtualMachine.TRACE_ALL;
+ }
+ Env.setTraceFlags(traceFlags);
+ }
+
void commandStop(StringTokenizer t) {
String atIn;
byte suspendPolicy = EventRequest.SUSPEND_ALL;
--- a/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/Env.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/Env.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,10 @@
}
}
+ static void setTraceFlags(int flags) {
+ connection.setTraceFlags(flags);
+ }
+
static VMConnection connection() {
return connection;
}
--- a/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/TTY.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/TTY.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -304,6 +304,7 @@
{"clear", "y", "n"},
{"connectors", "y", "y"},
{"cont", "n", "n"},
+ {"dbgtrace", "y", "y"},
{"disablegc", "n", "n"},
{"down", "n", "y"},
{"dump", "n", "y"},
@@ -587,6 +588,8 @@
evaluator.commandExclude(t);
} else if (cmd.equals("read")) {
readCommand(t);
+ } else if (cmd.equals("dbgtrace")) {
+ evaluator.commandDbgTrace(t);
} else if (cmd.equals("help") || cmd.equals("?")) {
help();
} else if (cmd.equals("version")) {
--- a/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/TTYResources.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/TTYResources.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -107,6 +107,7 @@
{"Current thread isnt suspended.", "Current thread isn't suspended."},
{"Current thread not set.", "Current thread not set."},
{"dbgtrace flag value must be an integer:", "dbgtrace flag value must be an integer: {0}"},
+ {"dbgtrace command value must be an integer:", "dbgtrace command value must be an integer: {0}"},
{"Deferring.", "Deferring {0}.\nIt will be set after the class is loaded."},
{"End of stack.", "End of stack."},
{"Error popping frame", "Error popping frame - {0}"},
@@ -411,6 +412,7 @@
"<n> <command> -- repeat command n times\n" +
"# <command> -- discard (no-op)\n" +
"help (or ?) -- list commands\n" +
+ "dbgtrace [flag] -- same as dbgtrace command line option" +
"version -- print version information\n" +
"exit (or quit) -- exit debugger\n" +
"\n" +
--- a/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/VMConnection.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.jdi/share/classes/com/sun/tools/example/debug/tty/VMConnection.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@
private final Connector connector;
private final Map<String, com.sun.jdi.connect.Connector.Argument> connectorArgs;
- private final int traceFlags;
+ private int traceFlags;
synchronized void notifyOutputComplete() {
outputCompleteCount++;
@@ -321,6 +321,17 @@
this.traceFlags = traceFlags;
}
+ public void setTraceFlags(int flags) {
+ this.traceFlags = flags;
+ /*
+ * If vm is not connected now, then vm.setDebugTraceMode() will
+ * be called when it is connected.
+ */
+ if (vm != null) {
+ vm.setDebugTraceMode(flags);
+ }
+ }
+
synchronized VirtualMachine open() {
if (connector instanceof LaunchingConnector) {
vm = launchTarget();
--- a/src/jdk.management.agent/share/classes/jdk/internal/agent/Agent.java Fri Feb 15 17:41:06 2019 -0500
+++ b/src/jdk.management.agent/share/classes/jdk/internal/agent/Agent.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -663,11 +663,7 @@
System.err.print(getText("agent.err.error") + ": " + keyText);
if (params != null && params.length != 0) {
- StringBuffer message = new StringBuffer(params[0]);
- for (int i = 1; i < params.length; i++) {
- message.append(" " + params[i]);
- }
- System.err.println(": " + message);
+ System.err.println(": " + String.join(" ", params));
}
e.printStackTrace();
throw new RuntimeException(e);
--- a/test/hotspot/gtest/unittest.hpp Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/gtest/unittest.hpp Sun Feb 17 09:54:08 2019 -0500
@@ -105,7 +105,7 @@
TEST(category, CONCAT(name, _vm_assert)) { \
ASSERT_EXIT(child_ ## category ## _ ## name ## _(), \
::testing::ExitedWithCode(1), \
- "assert failed: " msg); \
+ "^assert failed: " msg); \
} \
\
void test_ ## category ## _ ## name ## _()
--- a/test/hotspot/jtreg/ProblemList-graal.txt Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/ProblemList-graal.txt Sun Feb 17 09:54:08 2019 -0500
@@ -226,8 +226,4 @@
org.graalvm.compiler.hotspot.test.ReservedStackAccessTest 8213567 windows-all
-org.graalvm.compiler.replacements.test.StringCompressInflateTest 8214947
-
-org.graalvm.compiler.hotspot.test.BigIntegerIntrinsicsTest 8217289
-
org.graalvm.compiler.hotspot.test.CheckGraalIntrinsics 8218698
--- a/test/hotspot/jtreg/ProblemList.txt Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/ProblemList.txt Sun Feb 17 09:54:08 2019 -0500
@@ -82,7 +82,6 @@
runtime/handshake/HandshakeWalkSuspendExitTest.java 8214174 generic-all
runtime/SharedArchiveFile/SASymbolTableTest.java 8193639 solaris-all
-runtime/CompressedOops/UseCompressedOops.java 8079353 windows-all
#############################################################################
--- a/test/hotspot/jtreg/compiler/cha/StrengthReduceInterfaceCall.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/compiler/cha/StrengthReduceInterfaceCall.java Sun Feb 17 09:54:08 2019 -0500
@@ -53,6 +53,7 @@
import jdk.internal.org.objectweb.asm.MethodVisitor;
import jdk.internal.vm.annotation.DontInline;
import sun.hotspot.WhiteBox;
+import sun.hotspot.code.NMethod;
import java.io.IOException;
import java.lang.annotation.Retention;
@@ -695,10 +696,6 @@
public static final Unsafe U = Unsafe.getUnsafe();
interface Test<T> {
- boolean isCompiled();
- void assertNotCompiled();
- void assertCompiled();
-
void call(T o);
T receiver(int id);
@@ -733,14 +730,6 @@
};
}
- default void compile(Runnable r) {
- assertNotCompiled();
- while(!isCompiled()) {
- r.run();
- }
- assertCompiled();
- }
-
default void initialize(Class<?>... cs) {
for (Class<?> c : cs) {
U.ensureClassInitialized(c);
@@ -789,14 +778,31 @@
}));
}
- @Override
- public boolean isCompiled() { return WB.isMethodCompiled(TEST); }
+
+ public void compile(Runnable r) {
+ while (!WB.isMethodCompiled(TEST)) {
+ for (int i = 0; i < 100; i++) {
+ r.run();
+ }
+ }
+ assertCompiled(); // record nmethod info
+ }
+
+ private NMethod prevNM = null;
- @Override
- public void assertNotCompiled() { assertFalse(isCompiled()); }
+ public void assertNotCompiled() {
+ NMethod curNM = NMethod.get(TEST, false);
+ assertTrue(prevNM != null); // was previously compiled
+ assertTrue(curNM == null || prevNM.compile_id != curNM.compile_id); // either no nmethod present or recompiled
+ prevNM = curNM; // update nmethod info
+ }
- @Override
- public void assertCompiled() { assertTrue(isCompiled()); }
+ public void assertCompiled() {
+ NMethod curNM = NMethod.get(TEST, false);
+ assertTrue(curNM != null); // nmethod is present
+ assertTrue(prevNM == null || prevNM.compile_id == curNM.compile_id); // no recompilations if nmethod present
+ prevNM = curNM; // update nmethod info
+ }
@Override
public void call(T i) {
--- a/test/hotspot/jtreg/compiler/unsafe/X-UnsafeAccessTest.java.template Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/compiler/unsafe/X-UnsafeAccessTest.java.template Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -156,15 +156,15 @@
static void testAccess(Object base, long offset) {
// Plain
{
- UNSAFE.put$Type$(base, offset, $value1$);
- $type$ x = UNSAFE.get$Type$(base, offset);
+ UNSAFE.put$MethodAffix$(base, offset, $value1$);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "set $type$ value");
}
// Volatile
{
- UNSAFE.put$Type$Volatile(base, offset, $value2$);
- $type$ x = UNSAFE.get$Type$Volatile(base, offset);
+ UNSAFE.put$MethodAffix$Volatile(base, offset, $value2$);
+ $type$ x = UNSAFE.get$MethodAffix$Volatile(base, offset);
assertEquals(x, $value2$, "putVolatile $type$ value");
}
@@ -172,8 +172,8 @@
#if[Ordered]
// Lazy
{
- UNSAFE.putOrdered$Type$(base, offset, $value1$);
- $type$ x = UNSAFE.get$Type$Volatile(base, offset);
+ UNSAFE.putOrdered$MethodAffix$(base, offset, $value1$);
+ $type$ x = UNSAFE.get$MethodAffix$Volatile(base, offset);
assertEquals(x, $value1$, "putRelease $type$ value");
}
#end[Ordered]
@@ -182,15 +182,15 @@
#if[JdkInternalMisc]
// Lazy
{
- UNSAFE.put$Type$Release(base, offset, $value1$);
- $type$ x = UNSAFE.get$Type$Acquire(base, offset);
+ UNSAFE.put$MethodAffix$Release(base, offset, $value1$);
+ $type$ x = UNSAFE.get$MethodAffix$Acquire(base, offset);
assertEquals(x, $value1$, "putRelease $type$ value");
}
// Opaque
{
- UNSAFE.put$Type$Opaque(base, offset, $value2$);
- $type$ x = UNSAFE.get$Type$Opaque(base, offset);
+ UNSAFE.put$MethodAffix$Opaque(base, offset, $value2$);
+ $type$ x = UNSAFE.get$MethodAffix$Opaque(base, offset);
assertEquals(x, $value2$, "putOpaque $type$ value");
}
#end[JdkInternalMisc]
@@ -199,38 +199,38 @@
#if[Unaligned]
// Unaligned
{
- UNSAFE.put$Type$Unaligned(base, offset, $value2$);
- $type$ x = UNSAFE.get$Type$Unaligned(base, offset);
+ UNSAFE.put$MethodAffix$Unaligned(base, offset, $value2$);
+ $type$ x = UNSAFE.get$MethodAffix$Unaligned(base, offset);
assertEquals(x, $value2$, "putUnaligned $type$ value");
}
{
- UNSAFE.put$Type$Unaligned(base, offset, $value1$, true);
- $type$ x = UNSAFE.get$Type$Unaligned(base, offset, true);
+ UNSAFE.put$MethodAffix$Unaligned(base, offset, $value1$, true);
+ $type$ x = UNSAFE.get$MethodAffix$Unaligned(base, offset, true);
assertEquals(x, $value1$, "putUnaligned big endian $type$ value");
}
{
- UNSAFE.put$Type$Unaligned(base, offset, $value2$, false);
- $type$ x = UNSAFE.get$Type$Unaligned(base, offset, false);
+ UNSAFE.put$MethodAffix$Unaligned(base, offset, $value2$, false);
+ $type$ x = UNSAFE.get$MethodAffix$Unaligned(base, offset, false);
assertEquals(x, $value2$, "putUnaligned little endian $type$ value");
}
#end[Unaligned]
#end[JdkInternalMisc]
#if[CAS]
- UNSAFE.put$Type$(base, offset, $value1$);
+ UNSAFE.put$MethodAffix$(base, offset, $value1$);
// Compare
{
#if[JdkInternalMisc]
- boolean r = UNSAFE.compareAndSet$Type$(base, offset, $value1$, $value2$);
+ boolean r = UNSAFE.compareAndSet$MethodAffix$(base, offset, $value1$, $value2$);
assertEquals(r, true, "success compareAndSet $type$");
#else[JdkInternalMisc]
- boolean r = UNSAFE.compareAndSwap$Type$(base, offset, $value1$, $value2$);
+ boolean r = UNSAFE.compareAndSwap$MethodAffix$(base, offset, $value1$, $value2$);
assertEquals(r, true, "success compareAndSwap $type$");
#end[JdkInternalMisc]
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
#if[JdkInternalMisc]
assertEquals(x, $value2$, "success compareAndSet $type$ value");
#else[JdkInternalMisc]
@@ -240,13 +240,13 @@
{
#if[JdkInternalMisc]
- boolean r = UNSAFE.compareAndSet$Type$(base, offset, $value1$, $value3$);
+ boolean r = UNSAFE.compareAndSet$MethodAffix$(base, offset, $value1$, $value3$);
assertEquals(r, false, "failing compareAndSet $type$");
#else[JdkInternalMisc]
- boolean r = UNSAFE.compareAndSwap$Type$(base, offset, $value1$, $value3$);
+ boolean r = UNSAFE.compareAndSwap$MethodAffix$(base, offset, $value1$, $value3$);
assertEquals(r, false, "failing compareAndSwap $type$");
#end[JdkInternalMisc]
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
#if[JdkInternalMisc]
assertEquals(x, $value2$, "failing compareAndSet $type$ value");
#else[JdkInternalMisc]
@@ -257,107 +257,107 @@
#if[JdkInternalMisc]
// Advanced compare
{
- $type$ r = UNSAFE.compareAndExchange$Type$(base, offset, $value2$, $value1$);
+ $type$ r = UNSAFE.compareAndExchange$MethodAffix$(base, offset, $value2$, $value1$);
assertEquals(r, $value2$, "success compareAndExchange $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "success compareAndExchange $type$ value");
}
{
- $type$ r = UNSAFE.compareAndExchange$Type$(base, offset, $value2$, $value3$);
+ $type$ r = UNSAFE.compareAndExchange$MethodAffix$(base, offset, $value2$, $value3$);
assertEquals(r, $value1$, "failing compareAndExchange $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "failing compareAndExchange $type$ value");
}
{
- $type$ r = UNSAFE.compareAndExchange$Type$Acquire(base, offset, $value1$, $value2$);
+ $type$ r = UNSAFE.compareAndExchange$MethodAffix$Acquire(base, offset, $value1$, $value2$);
assertEquals(r, $value1$, "success compareAndExchangeAcquire $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value2$, "success compareAndExchangeAcquire $type$ value");
}
{
- $type$ r = UNSAFE.compareAndExchange$Type$Acquire(base, offset, $value1$, $value3$);
+ $type$ r = UNSAFE.compareAndExchange$MethodAffix$Acquire(base, offset, $value1$, $value3$);
assertEquals(r, $value2$, "failing compareAndExchangeAcquire $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value2$, "failing compareAndExchangeAcquire $type$ value");
}
{
- $type$ r = UNSAFE.compareAndExchange$Type$Release(base, offset, $value2$, $value1$);
+ $type$ r = UNSAFE.compareAndExchange$MethodAffix$Release(base, offset, $value2$, $value1$);
assertEquals(r, $value2$, "success compareAndExchangeRelease $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "success compareAndExchangeRelease $type$ value");
}
{
- $type$ r = UNSAFE.compareAndExchange$Type$Release(base, offset, $value2$, $value3$);
+ $type$ r = UNSAFE.compareAndExchange$MethodAffix$Release(base, offset, $value2$, $value3$);
assertEquals(r, $value1$, "failing compareAndExchangeRelease $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "failing compareAndExchangeRelease $type$ value");
}
{
boolean success = false;
for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
- success = UNSAFE.weakCompareAndSet$Type$Plain(base, offset, $value1$, $value2$);
+ success = UNSAFE.weakCompareAndSet$MethodAffix$Plain(base, offset, $value1$, $value2$);
}
assertEquals(success, true, "weakCompareAndSetPlain $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value2$, "weakCompareAndSetPlain $type$ value");
}
{
boolean success = false;
for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
- success = UNSAFE.weakCompareAndSet$Type$Acquire(base, offset, $value2$, $value1$);
+ success = UNSAFE.weakCompareAndSet$MethodAffix$Acquire(base, offset, $value2$, $value1$);
}
assertEquals(success, true, "weakCompareAndSetAcquire $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "weakCompareAndSetAcquire $type$");
}
{
boolean success = false;
for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
- success = UNSAFE.weakCompareAndSet$Type$Release(base, offset, $value1$, $value2$);
+ success = UNSAFE.weakCompareAndSet$MethodAffix$Release(base, offset, $value1$, $value2$);
}
assertEquals(success, true, "weakCompareAndSetRelease $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value2$, "weakCompareAndSetRelease $type$");
}
{
boolean success = false;
for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
- success = UNSAFE.weakCompareAndSet$Type$(base, offset, $value2$, $value1$);
+ success = UNSAFE.weakCompareAndSet$MethodAffix$(base, offset, $value2$, $value1$);
}
assertEquals(success, true, "weakCompareAndSet $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "weakCompareAndSet $type$");
}
#end[JdkInternalMisc]
- UNSAFE.put$Type$(base, offset, $value2$);
+ UNSAFE.put$MethodAffix$(base, offset, $value2$);
// Compare set and get
{
- $type$ o = UNSAFE.getAndSet$Type$(base, offset, $value1$);
+ $type$ o = UNSAFE.getAndSet$MethodAffix$(base, offset, $value1$);
assertEquals(o, $value2$, "getAndSet $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, $value1$, "getAndSet $type$ value");
}
#end[CAS]
#if[AtomicAdd]
- UNSAFE.put$Type$(base, offset, $value1$);
+ UNSAFE.put$MethodAffix$(base, offset, $value1$);
// get and add, add and get
{
- $type$ o = UNSAFE.getAndAdd$Type$(base, offset, $value2$);
+ $type$ o = UNSAFE.getAndAdd$MethodAffix$(base, offset, $value2$);
assertEquals(o, $value1$, "getAndAdd $type$");
- $type$ x = UNSAFE.get$Type$(base, offset);
+ $type$ x = UNSAFE.get$MethodAffix$(base, offset);
assertEquals(x, ($type$)($value1$ + $value2$), "getAndAdd $type$");
}
#end[AtomicAdd]
@@ -368,8 +368,8 @@
static void testAccess(long address) {
// Plain
{
- UNSAFE.put$Type$(address, $value1$);
- $type$ x = UNSAFE.get$Type$(address);
+ UNSAFE.put$MethodAffix$(address, $value1$);
+ $type$ x = UNSAFE.get$MethodAffix$(address);
assertEquals(x, $value1$, "set $type$ value");
}
}
--- a/test/hotspot/jtreg/compiler/unsafe/generate-unsafe-access-tests.sh Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/compiler/unsafe/generate-unsafe-access-tests.sh Sun Feb 17 09:54:08 2019 -0500
@@ -1,7 +1,7 @@
#!/bin/bash
#
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
# questions.
#
-javac -d . ../../../../jdk/make/src/classes/build/tools/spp/Spp.java
+javac -d . ../../../../../make/jdk/src/classes/build/tools/spp/Spp.java
SPP=build.tools.spp.Spp
@@ -41,6 +41,12 @@
Type="$(tr '[:lower:]' '[:upper:]' <<< ${type:0:1})${type:1}"
args="-K$type -Dtype=$type -DType=$Type"
+ if [ "$Type" == "Object" -a "$package" == "jdk.internal.misc" ]; then
+ args="$args -DMethodAffix=Reference"
+ else
+ args="$args -DMethodAffix=$Type"
+ fi
+
case $type in
Object|int|long)
args="$args -KCAS -KOrdered"
@@ -123,8 +129,10 @@
args="$args -Dvalue1=$value1 -Dvalue2=$value2 -Dvalue3=$value3"
echo $args
+ out=${Qualifier}UnsafeAccessTest${Type}.java
+ rm -rf "$out"
java $SPP -nel -K$Qualifier -Dpackage=$package -DQualifier=$Qualifier -Dmodule=$module \
- $args < X-UnsafeAccessTest.java.template > ${Qualifier}UnsafeAccessTest${Type}.java
+ $args -iX-UnsafeAccessTest.java.template -o$out
done
}
--- a/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,9 @@
public static void main(String[] args) throws Exception {
testCompressedOopsModesGCs();
- testCompressedOopsModesGCs("-XX:+UseLargePages");
+ if (!Platform.isOSX() && !Platform.isAix()) {
+ testCompressedOopsModesGCs("-XX:+UseLargePages");
+ }
}
public static void testCompressedOopsModesGCs(String... flags) throws Exception {
@@ -73,7 +75,7 @@
Collections.addAll(args, flags2);
if (Platform.is64bit()) {
- // Explicitly turn of compressed oops
+ // Explicitly turn off compressed oops
testCompressedOops(args, "-XX:-UseCompressedOops", "-Xmx32m")
.shouldNotContain("Compressed Oops")
.shouldHaveExitValue(0);
@@ -88,11 +90,13 @@
.shouldContain("Compressed Oops mode")
.shouldHaveExitValue(0);
- // Skip the following three test cases if we're on OSX or Solaris.
+ // Skip the following seven test cases if we're on OSX, Windows, or Solaris.
//
- // OSX doesn't seem to care about HeapBaseMinAddress and Solaris
- // puts the heap way up, forcing different behaviour.
- if (!Platform.isOSX() && !Platform.isSolaris()) {
+ // OSX doesn't seem to care about HeapBaseMinAddress. Windows memory
+ // locations are affected by ASLR. Solaris puts the heap way up,
+ // forcing different behaviour.
+ if (!Platform.isOSX() && !Platform.isWindows() && !Platform.isSolaris()) {
+
// Larger than 4gb heap should result in zero based with shift 3
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx5g")
.shouldContain("Zero based")
@@ -179,8 +183,8 @@
private static OutputAnalyzer testCompressedOops(ArrayList<String> flags1, String... flags2) throws Exception {
ArrayList<String> args = new ArrayList<>();
- // Always run with these three:
- args.add("-XX:+PrintCompressedOopsMode");
+ // Always run with these two:
+ args.add("-Xlog:gc+heap+coops=trace");
args.add("-Xms32m");
// Add the extra flags
--- a/test/hotspot/jtreg/runtime/classFileParserBug/TestBadClassName.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/runtime/classFileParserBug/TestBadClassName.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 8158297
+ * @bug 8158297 8218939
* @summary Constant pool utf8 entry for class name cannot have empty qualified name '//'
* @compile p1/BadInterface1.jcod
* @compile p1/BadInterface2.jcod
@@ -37,20 +37,32 @@
System.out.println("Regression test for bug 8042660");
- // Test class name with p1//BadInterface2
+ // Test class name with p1//BadInterface1
+ String expected = "Illegal class name \"p1//BadInterface1\" in class file UseBadInterface1";
try {
Class newClass = Class.forName("UseBadInterface1");
throw new RuntimeException("Expected ClassFormatError exception not thrown");
} catch (java.lang.ClassFormatError e) {
+ check(e, expected);
System.out.println("Test UseBadInterface1 passed test case with illegal class name");
}
// Test class name with p1/BadInterface2/
+ expected = "Illegal class name \"p1/BadInterface2/\" in class file UseBadInterface2";
try {
Class newClass = Class.forName("UseBadInterface2");
throw new RuntimeException("Expected ClassFormatError exception not thrown");
} catch (java.lang.ClassFormatError e) {
- System.out.println("Test UseBadInterface1 passed test case with illegal class name");
+ check(e, expected);
+ System.out.println("Test UseBadInterface2 passed test case with illegal class name");
+ }
+ }
+
+ static void check(ClassFormatError c, String expected) {
+ if (!c.getMessage().equals(expected)) {
+ throw new RuntimeException("Wrong ClassFormatError - expected: \"" +
+ expected + "\", got \"" +
+ c.getMessage() + "\"");
}
}
}
--- a/test/hotspot/jtreg/runtime/containers/docker/TestCPUAwareness.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/runtime/containers/docker/TestCPUAwareness.java Sun Feb 17 09:54:08 2019 -0500
@@ -175,7 +175,7 @@
System.out.println("cpuset = " + cpuset);
System.out.println("quota = " + quota);
System.out.println("period = " + period);
- System.out.println("shares = " + period);
+ System.out.println("shares = " + shares);
System.out.println("usePreferContainerQuotaForCPUCount = " + usePreferContainerQuotaForCPUCount);
System.out.println("expectedAPC = " + expectedAPC);
--- a/test/hotspot/jtreg/runtime/logging/SafepointTest.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/runtime/logging/SafepointTest.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,10 +40,9 @@
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:safepoint=trace",
InnerClass.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldContain("Safepoint synchronization initiated. (");
+ output.shouldContain("Safepoint synchronization initiated");
output.shouldContain("Entering safepoint region: ");
output.shouldContain("Leaving safepoint region");
- output.shouldContain("_at_poll_safepoint");
output.shouldHaveExitValue(0);
}
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdi/ClassType/invokeMethod/invokemethod008.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdi/ClassType/invokeMethod/invokemethod008.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -192,11 +192,11 @@
try {
retValue = testedClass.invokeMethod(thread, method, params, 0);
if ( ((PrimitiveValue )retValue).intValue() == Consts.TEST_FAILED ) {
- complain("VMDisconnectException is not thrown");
+ complain("VMDisconnectedException is not thrown");
exitStatus = Consts.TEST_FAILED;
}
} catch(VMDisconnectedException e) {
- display("!!!expected VMDisconnectException");
+ display("!!!expected VMDisconnectedException");
notifyVMDisconnect();
} catch(Exception e) {
complain("Unexpected " + e);
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdi/ClassType/newInstance/newinstance008.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdi/ClassType/newInstance/newinstance008.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -203,11 +203,11 @@
try {
retValue = testedClass.newInstance(thread, method, params, 0);
if ( ((PrimitiveValue )retValue).intValue() == Consts.TEST_FAILED ) {
- complain("VMDisconnectException is not thrown");
+ complain("VMDisconnectedException is not thrown");
exitStatus = Consts.TEST_FAILED;
}
} catch(VMDisconnectedException e) {
- display("!!!expected VMDisconnectException");
+ display("!!!expected VMDisconnectedException");
notifyVMDisconnect();
} catch(Exception e) {
complain("Unexpected " + e);
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdi/ThreadReference/popFrames/popframes004.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdi/ThreadReference/popFrames/popframes004.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -343,7 +343,7 @@
{ // to get mainThread suspended; otherwise its end results in
// no suspention for breakpointRequest2 (bug or not?), end of test,
- // and VMDisconnectException
+ // and VMDisconnectedException
thread2Ref.suspend();
log2("......eventSet.resume();");
eventSet.resume();
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdi/ThreadReference/popFrames/popframes005.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdi/ThreadReference/popFrames/popframes005.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -346,7 +346,7 @@
{ // to get mainThread suspended; otherwise its end results in
// no suspention for breakpointRequest2 (bug or not?), end of test,
- // and VMDisconnectException
+ // and VMDisconnectedException
thread2Ref.suspend();
log2("......eventSet.resume();");
eventSet.resume();
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdi/VirtualMachine/dispose/dispose005.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdi/VirtualMachine/dispose/dispose005.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@
* The debugger :
* clears interruption status, <BR>
* invokes vm.dispose() that results in <BR>
- * VMDisconnectException in the thread2 <BR>
+ * VMDisconnectedException in the thread2 <BR>
* which has been suspended after invoking "runt2" <BR>
* but after exception it is resumed and sends interruption<BR>
* to the main thread; <BR>
--- a/test/hotspot/jtreg/vmTestbase/vm/mlvm/anonloader/share/StressClassLoadingTest.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/vmTestbase/vm/mlvm/anonloader/share/StressClassLoadingTest.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -169,7 +169,7 @@
c = CustomClassLoaders.makeClassBytesLoader(classBytes, className)
.loadClass(className);
}
- c.newInstance();
+ UnsafeAccess.unsafe.ensureClassInitialized(c);
} catch (Throwable e) {
Env.traceVerbose(e, "parser caught exception");
}
--- a/test/hotspot/jtreg/vmTestbase/vm/mlvm/share/mlvmJvmtiUtils.cpp Fri Feb 15 17:41:06 2019 -0500
+++ b/test/hotspot/jtreg/vmTestbase/vm/mlvm/share/mlvmJvmtiUtils.cpp Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,23 @@
pEnv->ReleaseStringUTFChars(src, pStr);
}
+
+/**
+ * Helper class to track JVMTI resources, deallocating the resource in the destructor.
+ */
+class JvmtiResource {
+private:
+ jvmtiEnv* const _jvmtiEnv;
+ void* const _ptr;
+
+public:
+ JvmtiResource(jvmtiEnv* jvmtiEnv, void* ptr) : _jvmtiEnv(jvmtiEnv), _ptr(ptr) { }
+
+ ~JvmtiResource() {
+ NSK_JVMTI_VERIFY(_jvmtiEnv->Deallocate((unsigned char*)_ptr));
+ }
+};
+
struct MethodName * getMethodName(jvmtiEnv * pJvmtiEnv, jmethodID method) {
char * szName;
char * szSignature;
@@ -57,22 +74,31 @@
return NULL;
}
+ JvmtiResource szNameResource(pJvmtiEnv, szName);
+
if (!NSK_JVMTI_VERIFY(pJvmtiEnv->GetMethodDeclaringClass(method, &clazz))) {
- NSK_JVMTI_VERIFY(pJvmtiEnv->Deallocate((unsigned char*) szName));
return NULL;
}
if (!NSK_JVMTI_VERIFY(pJvmtiEnv->GetClassSignature(clazz, &szSignature, NULL))) {
- NSK_JVMTI_VERIFY(pJvmtiEnv->Deallocate((unsigned char*) szName));
return NULL;
}
+ JvmtiResource szSignatureResource(pJvmtiEnv, szSignature);
+
+ if (strlen(szName) + 1 > sizeof(mn->methodName) ||
+ strlen(szSignature) + 1 > sizeof(mn->classSig)) {
+ return NULL;
+ }
+
mn = (MethodName*) malloc(sizeof(MethodNameStruct));
+ if (mn == NULL) {
+ return NULL;
+ }
+
strncpy(mn->methodName, szName, sizeof(mn->methodName));
strncpy(mn->classSig, szSignature, sizeof(mn->classSig));
- NSK_JVMTI_VERIFY(pJvmtiEnv->Deallocate((unsigned char*) szName));
- NSK_JVMTI_VERIFY(pJvmtiEnv->Deallocate((unsigned char*) szSignature));
return mn;
}
--- a/test/jdk/com/sun/jdi/BasicJDWPConnectionTest.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/jdk/com/sun/jdi/BasicJDWPConnectionTest.java Sun Feb 17 09:54:08 2019 -0500
@@ -33,9 +33,11 @@
import java.net.Socket;
import java.net.SocketException;
+import jdk.test.lib.Utils;
import jdk.test.lib.apps.LingeredApp;
import java.util.ArrayList;
+import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -72,13 +74,27 @@
}
private static Pattern listenRegexp = Pattern.compile("Listening for transport \\b(.+)\\b at address: \\b(\\d+)\\b");
- private static int detectPort(String s) {
- Matcher m = listenRegexp.matcher(s);
- if (!m.find()) {
- throw new RuntimeException("Could not detect port from '" + s + "'");
+ private static int detectPort(LingeredApp app) {
+ long maxWaitTime = System.currentTimeMillis()
+ + Utils.adjustTimeout(10000); // 10 seconds adjusted for TIMEOUT_FACTOR
+ while (true) {
+ String s = app.getProcessStdout();
+ Matcher m = listenRegexp.matcher(s);
+ if (m.find()) {
+ // m.group(1) is transport, m.group(2) is port
+ return Integer.parseInt(m.group(2));
+ }
+ if (System.currentTimeMillis() > maxWaitTime) {
+ throw new RuntimeException("Could not detect port from '" + s + "' (timeout)");
+ }
+ try {
+ if (app.getProcess().waitFor(500, TimeUnit.MILLISECONDS)) {
+ throw new RuntimeException("Could not detect port from '" + s + "' (debuggee is terminated)");
+ }
+ } catch (InterruptedException e) {
+ // ignore
+ }
}
- // m.group(1) is transport, m.group(2) is port
- return Integer.parseInt(m.group(2));
}
public static void positiveTest(String testName, String allowOpt)
@@ -89,7 +105,7 @@
LingeredApp a = LingeredApp.startApp(cmd);
int res;
try {
- res = handshake(detectPort(a.getProcessStdout()));
+ res = handshake(detectPort(a));
} finally {
a.stopApp();
}
@@ -107,7 +123,7 @@
LingeredApp a = LingeredApp.startApp(cmd);
int res;
try {
- res = handshake(detectPort(a.getProcessStdout()));
+ res = handshake(detectPort(a));
} finally {
a.stopApp();
}
--- a/test/jdk/com/sun/jdi/RepStep.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/jdk/com/sun/jdi/RepStep.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
* @summary RepStep detects missed step events due to lack of
* frame pop events (in back-end).
* @author Robert Field
+ * @library /test/lib
*
* @run compile -g RepStepTarg.java
* @run build VMConnection RepStep
@@ -37,6 +38,7 @@
import com.sun.jdi.event.*;
import com.sun.jdi.request.*;
import com.sun.jdi.connect.*;
+import jdk.test.lib.process.StreamPumper;
import java.util.*;
@@ -90,6 +92,7 @@
EventSet set = queue.remove();
for (EventIterator it = set.eventIterator(); it.hasNext(); ) {
Event event = it.nextEvent();
+ System.out.println("event: " + String.valueOf(event));
if (event instanceof VMStartEvent) {
// get thread for setting step later
thread = ((VMStartEvent)event).thread();
@@ -165,6 +168,23 @@
optionsArg.setValue(VMConnection.getDebuggeeVMOptions());
vm = launcher.launch(connectorArgs);
+ // redirect stdout/stderr
+ new StreamPumper(vm.process().getInputStream())
+ .addPump(new StreamPumper.LinePump() {
+ @Override
+ protected void processLine(String line) {
+ System.out.println("[debugee_stdout] " + line);
+ }
+ })
+ .process();
+ new StreamPumper(vm.process().getErrorStream())
+ .addPump(new StreamPumper.LinePump() {
+ @Override
+ protected void processLine(String line) {
+ System.err.println("[debugee_stderr] " + line);
+ }
+ })
+ .process();
System.out.println("launched: " + TARGET);
}
--- a/test/jdk/java/util/Collection/IteratorMicroBenchmark.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/jdk/java/util/Collection/IteratorMicroBenchmark.java Sun Feb 17 09:54:08 2019 -0500
@@ -69,8 +69,6 @@
* Be patient; this program runs for a very long time.
* For faster runs, restrict execution using command line args.
*
- * This is an interface based version of ArrayList/IteratorMicroBenchmark
- *
* @author Martin Buchholz
*/
public class IteratorMicroBenchmark {
@@ -115,7 +113,9 @@
CountDownLatch finalized = new CountDownLatch(1);
ReferenceQueue<Object> queue = new ReferenceQueue<>();
WeakReference<Object> ref = new WeakReference<>(
- new Object() { protected void finalize() { finalized.countDown(); }},
+ new Object() {
+ @SuppressWarnings("deprecation")
+ protected void finalize() { finalized.countDown(); }},
queue);
try {
for (int tries = 3; tries--> 0; ) {
@@ -267,16 +267,22 @@
});
}
- static List<Integer> makeSubList(List<Integer> list) {
+ String goodClassName(Object x) {
+ return goodClassName(x.getClass());
+ }
+
+ static List<Integer> makeSubList(
+ List<Integer> elements,
+ UnaryOperator<List<Integer>> copyConstructor) {
+ final ArrayList<Integer> padded = new ArrayList<>();
final ThreadLocalRandom rnd = ThreadLocalRandom.current();
- int size = list.size();
- if (size <= 2) return list.subList(0, size);
- List<Integer> subList = list.subList(rnd.nextInt(0, 2),
- size - rnd.nextInt(0, 2));
- List<Integer> copy = new ArrayList<>(list);
- subList.clear();
- subList.addAll(copy);
- return subList;
+ final int frontPorch = rnd.nextInt(3);
+ final int backPorch = rnd.nextInt(3);
+ for (int n = frontPorch; n--> 0; ) padded.add(rnd.nextInt());
+ padded.addAll(elements);
+ for (int n = backPorch; n--> 0; ) padded.add(rnd.nextInt());
+ return copyConstructor.apply(padded)
+ .subList(frontPorch, frontPorch + elements.size());
}
void run() throws Throwable {
@@ -297,22 +303,42 @@
abq.add(abq.remove());
}
- ArrayList<Job> jobs = Stream.<Collection<Integer>>of(
- al, ad, abq,
- makeSubList(new ArrayList<>(al)),
+ final Integer[] array = al.toArray(new Integer[0]);
+ final List<Integer> immutableSubList
+ = makeSubList(al, x -> List.of(x.toArray(new Integer[0])));
+
+ Stream<Collection<Integer>> collections = concatStreams(
+ Stream.of(
+ // Lists and their subLists
+ al,
+ makeSubList(al, ArrayList::new),
+ new Vector<>(al),
+ makeSubList(al, Vector::new),
new LinkedList<>(al),
- makeSubList(new LinkedList<>(al)),
+ makeSubList(al, LinkedList::new),
+ new CopyOnWriteArrayList<>(al),
+ makeSubList(al, CopyOnWriteArrayList::new),
+
+ ad,
new PriorityQueue<>(al),
- new Vector<>(al),
- makeSubList(new Vector<>(al)),
- new CopyOnWriteArrayList<>(al),
- makeSubList(new CopyOnWriteArrayList<>(al)),
new ConcurrentLinkedQueue<>(al),
new ConcurrentLinkedDeque<>(al),
+
+ // Blocking Queues
+ abq,
new LinkedBlockingQueue<>(al),
new LinkedBlockingDeque<>(al),
new LinkedTransferQueue<>(al),
- new PriorityBlockingQueue<>(al))
+ new PriorityBlockingQueue<>(al),
+
+ List.of(al.toArray(new Integer[0]))),
+
+ // avoid UnsupportedOperationException in jdk9 and jdk10
+ (goodClassName(immutableSubList).equals("RandomAccessSubList"))
+ ? Stream.empty()
+ : Stream.of(immutableSubList));
+
+ ArrayList<Job> jobs = collections
.flatMap(x -> jobs(x))
.filter(job ->
nameFilter == null || nameFilter.matcher(job.name()).find())
@@ -329,16 +355,29 @@
return Stream.of(streams).flatMap(s -> s);
}
+ boolean isMutable(Collection<Integer> x) {
+ return !(x.getClass().getName().contains("ImmutableCollections$"));
+ }
+
Stream<Job> jobs(Collection<Integer> x) {
+ final String klazz = goodClassName(x);
return concatStreams(
collectionJobs(x),
+ (isMutable(x))
+ ? mutableCollectionJobs(x)
+ : Stream.empty(),
+
(x instanceof Deque)
? dequeJobs((Deque<Integer>)x)
: Stream.empty(),
(x instanceof List)
? listJobs((List<Integer>)x)
+ : Stream.empty(),
+
+ (x instanceof List && isMutable(x))
+ ? mutableListJobs((List<Integer>)x)
: Stream.empty());
}
@@ -350,7 +389,7 @@
}
Stream<Job> collectionJobs(Collection<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " iterate for loop") {
public void work() throws Throwable {
@@ -381,14 +420,6 @@
sum[0] = 0;
x.spliterator().forEachRemaining(n -> sum[0] += n);
check.sum(sum[0]);}}},
- new Job(klazz + " removeIf") {
- public void work() throws Throwable {
- int[] sum = new int[1];
- for (int i = 0; i < iterations; i++) {
- sum[0] = 0;
- if (x.removeIf(n -> { sum[0] += n; return false; }))
- throw new AssertionError();
- check.sum(sum[0]);}}},
new Job(klazz + " contains") {
public void work() throws Throwable {
int[] sum = new int[1];
@@ -407,14 +438,6 @@
if (x.containsAll(sneakyAdderCollection))
throw new AssertionError();
check.sum(sum[0]);}}},
- new Job(klazz + " remove(Object)") {
- public void work() throws Throwable {
- int[] sum = new int[1];
- Object sneakyAdder = sneakyAdder(sum);
- for (int i = 0; i < iterations; i++) {
- sum[0] = 0;
- if (x.remove(sneakyAdder)) throw new AssertionError();
- check.sum(sum[0]);}}},
new Job(klazz + " forEach") {
public void work() throws Throwable {
int[] sum = new int[1];
@@ -498,8 +521,29 @@
check.sum(sum[0]);}}});
}
+ Stream<Job> mutableCollectionJobs(Collection<Integer> x) {
+ final String klazz = goodClassName(x);
+ return Stream.of(
+ new Job(klazz + " removeIf") {
+ public void work() throws Throwable {
+ int[] sum = new int[1];
+ for (int i = 0; i < iterations; i++) {
+ sum[0] = 0;
+ if (x.removeIf(n -> { sum[0] += n; return false; }))
+ throw new AssertionError();
+ check.sum(sum[0]);}}},
+ new Job(klazz + " remove(Object)") {
+ public void work() throws Throwable {
+ int[] sum = new int[1];
+ Object sneakyAdder = sneakyAdder(sum);
+ for (int i = 0; i < iterations; i++) {
+ sum[0] = 0;
+ if (x.remove(sneakyAdder)) throw new AssertionError();
+ check.sum(sum[0]);}}});
+ }
+
Stream<Job> dequeJobs(Deque<Integer> x) {
- String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " descendingIterator() loop") {
public void work() throws Throwable {
@@ -519,7 +563,7 @@
}
Stream<Job> listJobs(List<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " listIterator forward loop") {
public void work() throws Throwable {
@@ -555,15 +599,6 @@
if (x.lastIndexOf(sneakyAdder) != -1)
throw new AssertionError();
check.sum(sum[0]);}}},
- new Job(klazz + " replaceAll") {
- public void work() throws Throwable {
- int[] sum = new int[1];
- UnaryOperator<Integer> sneakyAdder =
- x -> { sum[0] += x; return x; };
- for (int i = 0; i < iterations; i++) {
- sum[0] = 0;
- x.replaceAll(sneakyAdder);
- check.sum(sum[0]);}}},
new Job(klazz + " equals") {
public void work() throws Throwable {
ArrayList<Integer> copy = new ArrayList<>(x);
@@ -577,4 +612,18 @@
if (x.hashCode() != hashCode)
throw new AssertionError();}}});
}
+
+ Stream<Job> mutableListJobs(List<Integer> x) {
+ final String klazz = goodClassName(x);
+ return Stream.of(
+ new Job(klazz + " replaceAll") {
+ public void work() throws Throwable {
+ int[] sum = new int[1];
+ UnaryOperator<Integer> sneakyAdder =
+ x -> { sum[0] += x; return x; };
+ for (int i = 0; i < iterations; i++) {
+ sum[0] = 0;
+ x.replaceAll(sneakyAdder);
+ check.sum(sum[0]);}}});
+ }
}
--- a/test/jdk/java/util/Collection/RemoveMicroBenchmark.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/jdk/java/util/Collection/RemoveMicroBenchmark.java Sun Feb 17 09:54:08 2019 -0500
@@ -270,6 +270,10 @@
});
}
+ String goodClassName(Object x) {
+ return goodClassName(x.getClass());
+ }
+
static List<Integer> makeSubList(List<Integer> list) {
final ThreadLocalRandom rnd = ThreadLocalRandom.current();
int size = rnd.nextInt(4);
@@ -369,7 +373,7 @@
}
Stream<Job> collectionJobs(Collection<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " removeIf") {
public void work() throws Throwable {
@@ -422,7 +426,7 @@
}
Stream<Job> iteratorRemoveJobs(Collection<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " Iterator.remove") {
public void work() throws Throwable {
@@ -460,7 +464,7 @@
}
Stream<Job> queueJobs(Queue<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " poll()") {
public void work() throws Throwable {
@@ -474,7 +478,7 @@
}
Stream<Job> dequeJobs(Deque<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " descendingIterator().remove") {
public void work() throws Throwable {
@@ -509,7 +513,7 @@
}
Stream<Job> blockingQueueJobs(BlockingQueue<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " timed poll()") {
public void work() throws Throwable {
@@ -545,7 +549,7 @@
}
Stream<Job> blockingDequeJobs(BlockingDeque<Integer> x) {
- final String klazz = goodClassName(x.getClass());
+ final String klazz = goodClassName(x);
return Stream.of(
new Job(klazz + " timed pollFirst()") {
public void work() throws Throwable {
--- a/test/jdk/java/util/concurrent/CountDownLatch/Basic.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/jdk/java/util/concurrent/CountDownLatch/Basic.java Sun Feb 17 09:54:08 2019 -0500
@@ -25,24 +25,27 @@
* @test
* @bug 6332435
* @summary Basic tests for CountDownLatch
+ * @library /test/lib
* @author Seetharam Avadhanam, Martin Buchholz
*/
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
-
-interface AwaiterFactory {
- Awaiter getAwaiter();
-}
-
-abstract class Awaiter extends Thread {
- private volatile Throwable result = null;
- protected void result(Throwable result) { this.result = result; }
- public Throwable result() { return this.result; }
-}
+import jdk.test.lib.Utils;
public class Basic {
+ static final long LONG_DELAY_MS = Utils.adjustTimeout(10_000);
+
+ interface AwaiterFactory {
+ Awaiter getAwaiter();
+ }
+
+ abstract static class Awaiter extends Thread {
+ private volatile Throwable result = null;
+ protected void result(Throwable result) { this.result = result; }
+ public Throwable result() { return this.result; }
+ }
private void toTheStartingGate(CountDownLatch gate) {
try {
@@ -78,15 +81,12 @@
catch (Throwable result) { result(result); }}};
}
- private AwaiterFactory awaiterFactories(final CountDownLatch latch,
- final CountDownLatch gate,
- final int i) {
- if (i == 1)
- return new AwaiterFactory() { public Awaiter getAwaiter() {
- return awaiter(latch, gate); }};
+ AwaiterFactory awaiterFactory(CountDownLatch latch, CountDownLatch gate) {
+ return () -> awaiter(latch, gate);
+ }
- return new AwaiterFactory() { public Awaiter getAwaiter() {
- return awaiter(latch, gate, 10000); }};
+ AwaiterFactory timedAwaiterFactory(CountDownLatch latch, CountDownLatch gate) {
+ return () -> awaiter(latch, gate, LONG_DELAY_MS);
}
//----------------------------------------------------------------
@@ -100,8 +100,8 @@
for (int i = 0; i < 3; i++) {
CountDownLatch gate = new CountDownLatch(4);
- AwaiterFactory factory1 = test.awaiterFactories(latch, gate, 1);
- AwaiterFactory factory2 = test.awaiterFactories(latch, gate, 0);
+ AwaiterFactory factory1 = test.awaiterFactory(latch, gate);
+ AwaiterFactory factory2 = test.timedAwaiterFactory(latch, gate);
a[count] = factory1.getAwaiter(); a[count++].start();
a[count] = factory1.getAwaiter(); a[count++].start();
a[count] = factory2.getAwaiter(); a[count++].start();
@@ -129,8 +129,8 @@
for (int i = 0; i < 3; i++) {
CountDownLatch gate = new CountDownLatch(4);
- AwaiterFactory factory1 = test.awaiterFactories(latch, gate, 1);
- AwaiterFactory factory2 = test.awaiterFactories(latch, gate, 0);
+ AwaiterFactory factory1 = test.awaiterFactory(latch, gate);
+ AwaiterFactory factory2 = test.timedAwaiterFactory(latch, gate);
a[count] = factory1.getAwaiter(); a[count++].start();
a[count] = factory1.getAwaiter(); a[count++].start();
a[count] = factory2.getAwaiter(); a[count++].start();
@@ -162,8 +162,8 @@
for (int i = 0; i < 3; i++) {
CountDownLatch gate = new CountDownLatch(4);
- AwaiterFactory factory1 = test.awaiterFactories(latch, gate, 1);
- AwaiterFactory factory2 = test.awaiterFactories(latch, gate, 0);
+ AwaiterFactory factory1 = test.awaiterFactory(latch, gate);
+ AwaiterFactory factory2 = test.timedAwaiterFactory(latch, gate);
a[count] = test.awaiter(latch, gate, timeout[i]); a[count++].start();
a[count] = factory1.getAwaiter(); a[count++].start();
a[count] = factory2.getAwaiter(); a[count++].start();
--- a/test/jdk/java/util/concurrent/tck/CyclicBarrierTest.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/jdk/java/util/concurrent/tck/CyclicBarrierTest.java Sun Feb 17 09:54:08 2019 -0500
@@ -38,6 +38,9 @@
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -486,4 +489,34 @@
assertEquals(0, barrier.getNumberWaiting());
}
}
+
+ /**
+ * There can be more threads calling await() than parties, as long as each
+ * task only calls await once and the task count is a multiple of parties.
+ */
+ public void testMoreTasksThanParties() throws Exception {
+ final ThreadLocalRandom rnd = ThreadLocalRandom.current();
+ final int parties = rnd.nextInt(1, 5);
+ final int nTasks = rnd.nextInt(1, 5) * parties;
+ final AtomicInteger tripCount = new AtomicInteger(0);
+ final AtomicInteger awaitCount = new AtomicInteger(0);
+ final CyclicBarrier barrier =
+ new CyclicBarrier(parties, () -> tripCount.getAndIncrement());
+ final ExecutorService e = Executors.newFixedThreadPool(nTasks);
+ final Runnable awaiter = () -> {
+ try {
+ if (ThreadLocalRandom.current().nextBoolean())
+ barrier.await();
+ else
+ barrier.await(LONG_DELAY_MS, MILLISECONDS);
+ awaitCount.getAndIncrement();
+ } catch (Throwable fail) { threadUnexpectedException(fail); }};
+ try (PoolCleaner cleaner = cleaner(e)) {
+ for (int i = nTasks; i--> 0; )
+ e.execute(awaiter);
+ }
+ assertEquals(nTasks / parties, tripCount.get());
+ assertEquals(nTasks, awaitCount.get());
+ assertEquals(0, barrier.getNumberWaiting());
+ }
}
--- a/test/jdk/java/util/concurrent/tck/ForkJoinPool9Test.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/jdk/java/util/concurrent/tck/ForkJoinPool9Test.java Sun Feb 17 09:54:08 2019 -0500
@@ -38,6 +38,8 @@
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ForkJoinTask;
import java.util.concurrent.Future;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Stream;
import junit.framework.Test;
import junit.framework.TestSuite;
@@ -67,21 +69,33 @@
.findVarHandle(Thread.class, "contextClassLoader", ClassLoader.class);
ClassLoader systemClassLoader = ClassLoader.getSystemClassLoader();
boolean haveSecurityManager = (System.getSecurityManager() != null);
- CountDownLatch taskStarted = new CountDownLatch(1);
+ CountDownLatch runInCommonPoolStarted = new CountDownLatch(1);
+ ClassLoader classLoaderDistinctFromSystemClassLoader
+ = ClassLoader.getPlatformClassLoader();
+ assertNotSame(classLoaderDistinctFromSystemClassLoader,
+ systemClassLoader);
Runnable runInCommonPool = () -> {
- taskStarted.countDown();
+ runInCommonPoolStarted.countDown();
assertTrue(ForkJoinTask.inForkJoinPool());
- assertSame(ForkJoinPool.commonPool(),
- ForkJoinTask.getPool());
- assertSame(systemClassLoader,
- Thread.currentThread().getContextClassLoader());
- assertSame(systemClassLoader,
- CCL.get(Thread.currentThread()));
+ assertSame(ForkJoinPool.commonPool(), ForkJoinTask.getPool());
+ Thread currentThread = Thread.currentThread();
+
+ Stream.of(systemClassLoader, null).forEach(cl -> {
+ if (ThreadLocalRandom.current().nextBoolean())
+ // should always be permitted, without effect
+ currentThread.setContextClassLoader(cl);
+ });
+
+ Stream.of(currentThread.getContextClassLoader(),
+ (ClassLoader) CCL.get(currentThread))
+ .forEach(cl -> assertTrue(cl == systemClassLoader || cl == null));
+
if (haveSecurityManager)
assertThrows(
SecurityException.class,
() -> System.getProperty("foo"),
- () -> Thread.currentThread().setContextClassLoader(null));
+ () -> currentThread.setContextClassLoader(
+ classLoaderDistinctFromSystemClassLoader));
// TODO ?
// if (haveSecurityManager
// && Thread.currentThread().getClass().getSimpleName()
@@ -91,7 +105,7 @@
Future<?> f = ForkJoinPool.commonPool().submit(runInCommonPool);
// Ensure runInCommonPool is truly running in the common pool,
// by giving this thread no opportunity to "help" on get().
- await(taskStarted);
+ await(runInCommonPoolStarted);
assertNull(f.get());
}
--- a/test/langtools/jdk/javadoc/doclet/testLinkOption/TestLinkOption.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/langtools/jdk/javadoc/doclet/testLinkOption/TestLinkOption.java Sun Feb 17 09:54:08 2019 -0500
@@ -69,9 +69,7 @@
"-linkoffline", url, testSrc + "/jdk",
"-package",
"pkg", "mylib.lang");
- checkExit(Exit.ERROR);
- checkOutput(Output.OUT, true,
- "tag not supported in the generated HTML version: tt");
+ checkExit(Exit.OK);
checkOutput("pkg/C.html", true,
"<a href=\"" + url + "java/lang/String.html?is-external=true\" "
@@ -88,14 +86,14 @@
checkOutput("pkg/B.html", true,
"<div class=\"block\">A method with html tag the method "
+ "<a href=\"" + url + "java/lang/ClassLoader.html?is-external=true#getSystemClassLoader()\""
- + " title=\"class or interface in java.lang\" class=\"externalLink\"><code><tt>getSystemClassLoader()</tt>"
+ + " title=\"class or interface in java.lang\" class=\"externalLink\"><code><b>getSystemClassLoader()</b>"
+ "</code></a> as the parent class loader.</div>",
"<div class=\"block\">is equivalent to invoking <code>"
+ "<a href=\"#createTempFile(java.lang.String,java.lang.String,java.io.File)\">"
+ "<code>createTempFile(prefix, suffix, null)</code></a></code>.</div>",
"<a href=\"" + url + "java/lang/String.html?is-external=true\" "
+ "title=\"class or interface in java.lang\" class=\"externalLink\">Link-Plain to String Class</a>",
- "<code><tt>getSystemClassLoader()</tt></code>",
+ "<code><b>getSystemClassLoader()</b></code>",
"<code>createTempFile(prefix, suffix, null)</code>",
"<dd><a href=\"http://www.ietf.org/rfc/rfc2279.txt\"><i>RFC 2279: UTF-8, a\n" +
" transformation format of ISO 10646</i></a>, <br><a " +
--- a/test/langtools/jdk/javadoc/doclet/testLinkOption/pkg/B.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/langtools/jdk/javadoc/doclet/testLinkOption/pkg/B.java Sun Feb 17 09:54:08 2019 -0500
@@ -31,7 +31,7 @@
public class B {
/**
* A method with html tag the method {@link ClassLoader#getSystemClassLoader()
- * <tt>getSystemClassLoader()</tt>} as the parent class loader.
+ * <b>getSystemClassLoader()</b>} as the parent class loader.
*/
public void method1() {}
--- a/test/langtools/tools/javac/6304921/TestLog.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/langtools/tools/javac/6304921/TestLog.java Sun Feb 17 09:54:08 2019 -0500
@@ -76,7 +76,7 @@
Set<DiagnosticFlag> defaultErrorFlags =
(Set<DiagnosticFlag>) defaultErrorFlagsField.get(diagnosticFactory);
- defaultErrorFlags.add(DiagnosticFlag.MULTIPLE);
+ defaultErrorFlags.add(DiagnosticFlag.API);
JavacFileManager.preRegister(context);
ParserFactory pfac = ParserFactory.instance(context);
--- a/test/langtools/tools/javac/diags/examples.not-yet.txt Fri Feb 15 17:41:06 2019 -0500
+++ b/test/langtools/tools/javac/diags/examples.not-yet.txt Sun Feb 17 09:54:08 2019 -0500
@@ -125,6 +125,7 @@
compiler.misc.bad.const.pool.entry # constant pool entry has wrong type
compiler.warn.access.to.member.from.serializable.lambda # in order to generate it we need to modify a restricted package
compiler.warn.invalid.path # this warning is generated only in Windows systems
+compiler.err.invalid.path # this error is generated only in Windows systems
compiler.note.multiple.elements # needs user code
compiler.err.preview.feature.disabled.classfile # preview feature support: needs compilation against classfile
compiler.warn.preview.feature.use.classfile # preview feature support: needs compilation against classfile
--- a/test/langtools/tools/javac/options/T6986895.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/langtools/tools/javac/options/T6986895.java Sun Feb 17 09:54:08 2019 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test
* @bug 6986895
+ * @bug 8201544
* @summary compiler gives misleading message for no input files
* @modules jdk.compiler
*/
@@ -38,6 +39,8 @@
String noSourceFiles = "no source files";
String noSourceFilesOrClasses = "no source files or class names";
+ String invalidFileName = "Invalid filename";
+ boolean isWindows = System.getProperty("os.name").startsWith("Windows");
void run() throws Exception {
Locale prev = Locale.getDefault();
@@ -45,6 +48,8 @@
Locale.setDefault(Locale.ENGLISH);
test(noSourceFiles, "-Werror");
test(noSourceFilesOrClasses, "-Werror", "-Xprint");
+ if (isWindows)
+ test(invalidFileName, "-Werror", "someNonExistingFile*.java");
} finally {
Locale.setDefault(prev);
}
--- a/test/langtools/tools/javac/processing/6994946/SemanticErrorTest.2.out Fri Feb 15 17:41:06 2019 -0500
+++ b/test/langtools/tools/javac/processing/6994946/SemanticErrorTest.2.out Sun Feb 17 09:54:08 2019 -0500
@@ -1,3 +1,4 @@
+- compiler.err.proc.messager: Deliberate Error
+SemanticErrorTest.java:13:1: compiler.err.proc.messager: Deliberate Error on Trees
SemanticErrorTest.java:13:46: compiler.err.repeated.interface
-- compiler.err.proc.messager: Deliberate Error
-2 errors
+3 errors
--- a/test/langtools/tools/javac/processing/6994946/TestProcessor.java Fri Feb 15 17:41:06 2019 -0500
+++ b/test/langtools/tools/javac/processing/6994946/TestProcessor.java Sun Feb 17 09:54:08 2019 -0500
@@ -27,13 +27,21 @@
import javax.lang.model.element.*;
import static javax.tools.Diagnostic.Kind.*;
+import com.sun.source.util.TreePath;
+import com.sun.source.util.Trees;
+
public class TestProcessor extends JavacTestingAbstractProcessor {
private int round = 0;
public boolean process(Set<? extends TypeElement> annotations,
RoundEnvironment roundEnv) {
- if (++round == 1)
+ if (++round == 1) {
messager.printMessage(ERROR, "Deliberate Error");
+ Trees trees = Trees.instance(processingEnv);
+ TreePath elPath = trees.getPath(roundEnv.getRootElements().iterator().next());
+ trees.printMessage(ERROR, "Deliberate Error on Trees",
+ elPath.getLeaf(), elPath.getCompilationUnit());
+ }
return false;
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/processing/GenerateAndError.java Sun Feb 17 09:54:08 2019 -0500
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8217381
+ * @summary Check error are convenient when AP generates a source file and
+ * an error in the same round
+ * @library /tools/javac/lib
+ * @modules jdk.compiler
+ * @build JavacTestingAbstractProcessor GenerateAndError
+ * @compile/fail/ref=GenerateAndError.out -XDrawDiagnostics -processor GenerateAndError GenerateAndErrorTest.java
+ */
+
+import java.io.IOException;
+import java.io.Writer;
+import java.util.*;
+
+import javax.annotation.processing.*;
+import javax.lang.model.element.*;
+import javax.tools.Diagnostic.Kind;
+
+public class GenerateAndError extends JavacTestingAbstractProcessor {
+ int round = 0;
+ @Override
+ public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) {
+ if (round++ == 0) {
+ try (Writer w = processingEnv.getFiler().createSourceFile("Extra").openWriter()) {
+ w.write("public class Extra {}");
+ } catch (IOException ex) {
+ throw new IllegalStateException(ex);
+ }
+ processingEnv.getMessager().printMessage(Kind.ERROR, "error");
+ }
+ return false;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/processing/GenerateAndError.out Sun Feb 17 09:54:08 2019 -0500
@@ -0,0 +1,3 @@
+- compiler.err.proc.messager: error
+GenerateAndErrorTest.java:2:60: compiler.err.cant.resolve: kindname.class, ExtraExtra, ,
+2 errors
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/processing/GenerateAndErrorTest.java Sun Feb 17 09:54:08 2019 -0500
@@ -0,0 +1,2 @@
+/* /nodynamiccopyright/ */
+public class GenerateAndErrorTest extends Extra implements ExtraExtra {}