--- a/.hgtags Mon Aug 19 20:31:10 2019 -0400
+++ b/.hgtags Mon Aug 19 21:14:34 2019 -0400
@@ -581,3 +581,4 @@
c0023e364b6f130cb1e93747b796d8718d544db1 jdk-14+8
9c250a7600e12bdb1e611835250af3204d4aa152 jdk-13+33
18f189e69b29f8215a3500b875127ed4fb2d977a jdk-14+9
+ececb6dae777e622abda42c705fd984a42f46b5a jdk-14+10
--- a/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java Mon Aug 19 20:31:10 2019 -0400
+++ b/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java Mon Aug 19 21:14:34 2019 -0400
@@ -32,6 +32,7 @@
package build.tools.classlist;
import java.net.InetAddress;
+import java.nio.file.FileSystems;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
@@ -56,6 +57,8 @@
public static void main(String ... args) {
+ FileSystems.getDefault();
+
List<String> strings = Arrays.asList("Hello", "World!", "From: ",
InetAddress.getLoopbackAddress().toString());
--- a/make/lib/Lib-java.base.gmk Mon Aug 19 20:31:10 2019 -0400
+++ b/make/lib/Lib-java.base.gmk Mon Aug 19 21:14:34 2019 -0400
@@ -56,7 +56,7 @@
LIBS_solaris := -lnsl -lsocket $(LIBDL), \
LIBS_aix := $(LIBDL),\
LIBS_windows := ws2_32.lib jvm.lib secur32.lib iphlpapi.lib winhttp.lib \
- urlmon.lib delayimp.lib $(WIN_JAVA_LIB) advapi32.lib, \
+ delayimp.lib $(WIN_JAVA_LIB) advapi32.lib, \
LIBS_macosx := -framework CoreFoundation -framework CoreServices, \
))
--- a/src/hotspot/cpu/aarch64/aarch64.ad Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Mon Aug 19 21:14:34 2019 -0400
@@ -1771,7 +1771,7 @@
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
}
-
+
int bangsize = C->bang_size_in_bytes();
if (C->need_stack_bang(bangsize) && UseStackBanging)
__ generate_stack_overflow_check(bangsize);
@@ -3508,7 +3508,7 @@
assert_different_registers(oop, box, tmp, disp_hdr);
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
__ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
if (UseBiasedLocking && !UseOptoBiasInlining) {
@@ -3516,17 +3516,17 @@
}
// Check for existing monitor
- __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
-
- // Set tmp to be (markOop of object | UNLOCK_VALUE).
- __ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
+ __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
+
+ // Set tmp to be (markWord of object | UNLOCK_VALUE).
+ __ orr(tmp, disp_hdr, markWord::unlocked_value);
// Initialize the box. (Must happen before we update the object mark!)
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
- // Compare object markOop with an unlocked value (tmp) and if
- // equal exchange the stack address of our box with object markOop.
- // On failure disp_hdr contains the possibly locked markOop.
+ // Compare object markWord with an unlocked value (tmp) and if
+ // equal exchange the stack address of our box with object markWord.
+ // On failure disp_hdr contains the possibly locked markWord.
__ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, disp_hdr);
__ br(Assembler::EQ, cont);
@@ -3540,10 +3540,10 @@
// We did not see an unlocked object so try the fast recursive case.
// Check if the owner is self by comparing the value in the
- // markOop of object (disp_hdr) with the stack pointer.
+ // markWord of object (disp_hdr) with the stack pointer.
__ mov(rscratch1, sp);
__ sub(disp_hdr, disp_hdr, rscratch1);
- __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place));
+ __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
// If condition is true we are cont and hence we can store 0 as the
// displaced header in the box, which indicates that it is a recursive lock.
__ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
@@ -3558,15 +3558,15 @@
// otherwise m->owner may contain a thread or a stack address.
//
// Try to CAS m->owner from NULL to current thread.
- __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
+ __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
__ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, noreg); // Sets flags for result
// Store a non-null value into the box to avoid looking like a re-entrant
// lock. The fast-path monitor unlock code checks for
- // markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
+ // markWord::monitor_value so use markWord::unused_mark which has the
// relevant bit set, and also matches ObjectSynchronizer::slow_enter.
- __ mov(tmp, (address)markOopDesc::unused_mark());
+ __ mov(tmp, (address)markWord::unused_mark().value());
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
__ bind(cont);
@@ -3598,10 +3598,10 @@
// Handle existing monitor.
__ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
- __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
+ __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
// Check if it is still a light weight lock, this is is true if we
- // see the stack address of the basicLock in the markOop of the
+ // see the stack address of the basicLock in the markWord of the
// object.
__ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
@@ -3612,7 +3612,7 @@
// Handle existing monitor.
__ bind(object_has_monitor);
- __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
+ __ add(tmp, tmp, -markWord::monitor_value); // monitor
__ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
__ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
__ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -82,7 +82,7 @@
// Load object header
ldr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
- orr(hdr, hdr, markOopDesc::unlocked_value);
+ orr(hdr, hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
str(hdr, Address(disp_hdr, 0));
// test if object header is still the same (i.e. unlocked), and if so, store the
@@ -176,7 +176,7 @@
ldr(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t
- mov(t1, (int32_t)(intptr_t)markOopDesc::prototype());
+ mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
}
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -242,9 +242,9 @@
Label done;
__ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ eon(tmp, tmp, zr);
- __ ands(zr, tmp, markOopDesc::lock_mask_in_place);
+ __ ands(zr, tmp, markWord::lock_mask_in_place);
__ br(Assembler::NE, done);
- __ orr(tmp, tmp, markOopDesc::marked_value);
+ __ orr(tmp, tmp, markWord::marked_value);
__ eon(dst, tmp, zr);
__ bind(done);
@@ -548,11 +548,11 @@
Label slow_path;
__ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes()));
__ eon(tmp1, tmp1, zr);
- __ ands(zr, tmp1, markOopDesc::lock_mask_in_place);
+ __ ands(zr, tmp1, markWord::lock_mask_in_place);
__ br(Assembler::NE, slow_path);
// Decode forwarded object.
- __ orr(tmp1, tmp1, markOopDesc::marked_value);
+ __ orr(tmp1, tmp1, markWord::marked_value);
__ eon(res, tmp1, zr);
__ b(*stub->continuation());
@@ -665,11 +665,11 @@
Label slow_path;
__ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
__ eon(rscratch1, rscratch1, zr);
- __ ands(zr, rscratch1, markOopDesc::lock_mask_in_place);
+ __ ands(zr, rscratch1, markWord::lock_mask_in_place);
__ br(Assembler::NE, slow_path);
// Decode forwarded object.
- __ orr(rscratch1, rscratch1, markOopDesc::marked_value);
+ __ orr(rscratch1, rscratch1, markWord::marked_value);
__ eon(r0, rscratch1, zr);
__ ret(lr);
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -32,8 +32,6 @@
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-define_pd_global(bool, ShareVtableStubs, true);
-
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -472,7 +472,7 @@
counters = BiasedLocking::counters();
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg);
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
Address saved_mark_addr(lock_reg, 0);
@@ -489,15 +489,15 @@
null_check_offset = offset();
ldr(swap_reg, mark_addr);
}
- andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place);
- cmp(tmp_reg, (u1)markOopDesc::biased_lock_pattern);
+ andr(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place);
+ cmp(tmp_reg, (u1)markWord::biased_lock_pattern);
br(Assembler::NE, cas_label);
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
load_prototype_header(tmp_reg, obj_reg);
orr(tmp_reg, tmp_reg, rthread);
eor(tmp_reg, swap_reg, tmp_reg);
- andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place));
+ andr(tmp_reg, tmp_reg, ~((int) markWord::age_mask_in_place));
if (counters != NULL) {
Label around;
cbnz(tmp_reg, around);
@@ -520,7 +520,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ andr(rscratch1, tmp_reg, markWord::biased_lock_mask_in_place);
cbnz(rscratch1, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -532,7 +532,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place);
+ andr(rscratch1, tmp_reg, markWord::epoch_mask_in_place);
cbnz(rscratch1, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@@ -543,7 +543,7 @@
// don't accidentally blow away another thread's valid bias.
{
Label here;
- mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ mov(rscratch1, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
andr(swap_reg, swap_reg, rscratch1);
orr(tmp_reg, swap_reg, rthread);
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
@@ -628,8 +628,8 @@
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
- cmp(temp_reg, (u1)markOopDesc::biased_lock_pattern);
+ andr(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
+ cmp(temp_reg, (u1)markWord::biased_lock_pattern);
br(Assembler::EQ, done);
}
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3615,7 +3615,7 @@
if (UseBiasedLocking) {
__ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
} else {
- __ mov(rscratch1, (intptr_t)markOopDesc::prototype());
+ __ mov(rscratch1, (intptr_t)markWord::prototype().value());
}
__ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
__ store_klass_gap(r0, zr); // zero klass gap for compressed oops
--- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -92,7 +92,7 @@
if(UseBiasedLocking && !len->is_valid()) {
ldr(tmp, Address(klass, Klass::prototype_header_offset()));
} else {
- mov(tmp, (intptr_t)markOopDesc::prototype());
+ mov(tmp, (intptr_t)markWord::prototype().value());
}
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
@@ -219,7 +219,7 @@
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
str(obj, Address(disp_hdr, obj_offset));
- tst(hdr, markOopDesc::unlocked_value);
+ tst(hdr, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive locking
--- a/src/hotspot/cpu/arm/globals_arm.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/arm/globals_arm.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -30,8 +30,6 @@
// (see globals.hpp)
//
-define_pd_global(bool, ShareVtableStubs, true);
-
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -878,7 +878,7 @@
ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
// Test if object is already locked
- tst(Rmark, markOopDesc::unlocked_value);
+ tst(Rmark, markWord::unlocked_value);
b(already_locked, eq);
// Save old object->mark() into BasicLock's displaced header
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1345,7 +1345,7 @@
}
#endif
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
// Biased locking
@@ -1367,8 +1367,8 @@
// On MP platform loads could return 'stale' values in some cases.
// That is acceptable since either CAS or slow case path is taken in the worst case.
- andr(tmp_reg, swap_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
- cmp(tmp_reg, markOopDesc::biased_lock_pattern);
+ andr(tmp_reg, swap_reg, (uintx)markWord::biased_lock_mask_in_place);
+ cmp(tmp_reg, markWord::biased_lock_pattern);
b(cas_label, ne);
@@ -1379,7 +1379,7 @@
orr(tmp_reg, tmp_reg, Rthread);
eor(tmp_reg, tmp_reg, swap_reg);
- bics(tmp_reg, tmp_reg, ((int) markOopDesc::age_mask_in_place));
+ bics(tmp_reg, tmp_reg, ((int) markWord::age_mask_in_place));
#ifndef PRODUCT
if (counters != NULL) {
@@ -1401,7 +1401,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- tst(tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
+ tst(tmp_reg, (uintx)markWord::biased_lock_mask_in_place);
b(try_revoke_bias, ne);
// Biasing is still enabled for this data type. See whether the
@@ -1413,7 +1413,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- tst(tmp_reg, (uintx)markOopDesc::epoch_mask_in_place);
+ tst(tmp_reg, (uintx)markWord::epoch_mask_in_place);
b(try_rebias, ne);
// tmp_reg has the age, epoch and pattern bits cleared
@@ -1431,10 +1431,10 @@
// until the assembler can be made smarter, we need to make some assumptions about the values
// so we can optimize this:
- assert((markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
+ assert((markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
mov(swap_reg, AsmOperand(swap_reg, lsl, 23));
- mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markOop with thread bits cleared (for CAS)
+ mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markWord with thread bits cleared (for CAS)
orr(tmp_reg, swap_reg, Rthread); // new mark
@@ -1519,8 +1519,8 @@
// the bias bit would be clear.
ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andr(tmp_reg, tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
- cmp(tmp_reg, markOopDesc::biased_lock_pattern);
+ andr(tmp_reg, tmp_reg, (uintx)markWord::biased_lock_mask_in_place);
+ cmp(tmp_reg, markWord::biased_lock_pattern);
b(done, eq);
}
@@ -1993,7 +1993,7 @@
// Invariant: Rmark loaded below does not contain biased lock pattern
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
- tst(Rmark, markOopDesc::unlocked_value);
+ tst(Rmark, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive lock
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -861,16 +861,16 @@
__ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- assert(markOopDesc::unlocked_value == 1, "adjust this code");
- __ tbz(Rtemp, exact_log2(markOopDesc::unlocked_value), slow_case);
+ assert(markWord::unlocked_value == 1, "adjust this code");
+ __ tbz(Rtemp, exact_log2(markWord::unlocked_value), slow_case);
if (UseBiasedLocking) {
- assert(is_power_of_2(markOopDesc::biased_lock_bit_in_place), "adjust this code");
- __ tbnz(Rtemp, exact_log2(markOopDesc::biased_lock_bit_in_place), slow_case);
+ assert(is_power_of_2(markWord::biased_lock_bit_in_place), "adjust this code");
+ __ tbnz(Rtemp, exact_log2(markWord::biased_lock_bit_in_place), slow_case);
}
- __ bics(Rtemp, Rtemp, ~markOopDesc::hash_mask_in_place);
- __ mov(R0, AsmOperand(Rtemp, lsr, markOopDesc::hash_shift), ne);
+ __ bics(Rtemp, Rtemp, ~markWord::hash_mask_in_place);
+ __ mov(R0, AsmOperand(Rtemp, lsr, markWord::hash_shift), ne);
__ bx(LR, ne);
__ bind(slow_case);
@@ -1172,7 +1172,7 @@
__ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
__ sub(disp_hdr, FP, lock_slot_fp_offset);
- __ tst(mark, markOopDesc::unlocked_value);
+ __ tst(mark, markWord::unlocked_value);
__ b(fast_lock, ne);
// Check for recursive lock
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -487,19 +487,20 @@
__ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
- Label Condy, exit;
-#ifdef __ABI_HARD__
- Label Long;
// get type from tags
__ add(Rtemp, Rtags, tags_offset);
__ ldrb(Rtemp, Address(Rtemp, Rindex));
+
+ Label Condy, exit;
+#ifdef __ABI_HARD__
+ Label NotDouble;
__ cmp(Rtemp, JVM_CONSTANT_Double);
- __ b(Long, ne);
+ __ b(NotDouble, ne);
__ ldr_double(D0_tos, Address(Rbase, base_offset));
__ push(dtos);
__ b(exit);
- __ bind(Long);
+ __ bind(NotDouble);
#endif
__ cmp(Rtemp, JVM_CONSTANT_Long);
@@ -4045,7 +4046,7 @@
if (UseBiasedLocking) {
__ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
} else {
- __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
+ __ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
}
// mark
__ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -110,12 +110,12 @@
}
// ... and mark it unlocked.
- ori(Rmark, Rmark, markOopDesc::unlocked_value);
+ ori(Rmark, Rmark, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
- // Compare object markOop with Rmark and if equal exchange Rscratch with object markOop.
+ // Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/Rscratch,
@@ -137,7 +137,7 @@
bind(cas_failed);
// We did not find an unlocked object so see if this is a recursive case.
sub(Rscratch, Rscratch, R1_SP);
- load_const_optimized(R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
and_(R0/*==0?*/, Rscratch, R0);
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
bne(CCR0, slow_int);
@@ -171,7 +171,7 @@
}
// Check if it is still a light weight lock, this is is true if we see
- // the stack address of the basicLock in the markOop of the object.
+ // the stack address of the basicLock in the markWord of the object.
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/R0,
/*compare_value=*/Rbox,
@@ -215,7 +215,7 @@
if (UseBiasedLocking && !len->is_valid()) {
ld(t1, in_bytes(Klass::prototype_header_offset()), klass);
} else {
- load_const_optimized(t1, (intx)markOopDesc::prototype());
+ load_const_optimized(t1, (intx)markWord::prototype().value());
}
std(t1, oopDesc::mark_offset_in_bytes(), obj);
store_klass(obj, klass);
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -32,8 +32,6 @@
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-define_pd_global(bool, ShareVtableStubs, true);
-
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -881,7 +881,7 @@
} else {
// template code:
//
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
@@ -903,17 +903,17 @@
assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
if (UseBiasedLocking) {
biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
}
- // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
- ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+ // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
+ ori(displaced_header, displaced_header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
@@ -949,12 +949,12 @@
// We did not see an unlocked object so try the fast recursive case.
- // Check if owner is self by comparing the value in the markOop of object
+ // Check if owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
- load_const_optimized(tmp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
+ load_const_optimized(tmp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, tmp);
// If condition is true we are done and hence we can store 0 in the displaced
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -2078,7 +2078,7 @@
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
"biased locking makes assumptions about bit layout");
if (PrintBiasedLockingStatistics) {
@@ -2088,13 +2088,13 @@
stwx(temp_reg, temp2_reg);
}
- andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
- cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+ andi(temp_reg, mark_reg, markWord::biased_lock_mask_in_place);
+ cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
bne(cr_reg, cas_label);
load_klass(temp_reg, obj_reg);
- load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
+ load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
orr(temp_reg, R16_thread, temp_reg);
xorr(temp_reg, mark_reg, temp_reg);
@@ -2125,7 +2125,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
+ andi(temp2_reg, temp_reg, markWord::biased_lock_mask_in_place);
cmpwi(cr_reg, temp2_reg, 0);
bne(cr_reg, try_revoke_bias);
@@ -2139,10 +2139,10 @@
// otherwise the manipulations it performs on the mark word are
// illegal.
- int shift_amount = 64 - markOopDesc::epoch_shift;
+ int shift_amount = 64 - markWord::epoch_shift;
// rotate epoch bits to right (little) end and set other bits to 0
// [ big part | epoch | little part ] -> [ 0..0 | epoch ]
- rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
+ rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markWord::epoch_bits);
// branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
bne(CCR0, try_rebias);
@@ -2152,9 +2152,9 @@
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
- andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
- markOopDesc::age_mask_in_place |
- markOopDesc::epoch_mask_in_place));
+ andi(mark_reg, mark_reg, (markWord::biased_lock_mask_in_place |
+ markWord::age_mask_in_place |
+ markWord::epoch_mask_in_place));
orr(temp_reg, R16_thread, mark_reg);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@@ -2187,7 +2187,7 @@
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
load_klass(temp_reg, obj_reg);
- andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
+ andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
orr(temp2_reg, R16_thread, temp2_reg);
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
orr(temp_reg, temp2_reg, temp_reg);
@@ -2224,7 +2224,7 @@
// normal locking code.
load_klass(temp_reg, obj_reg);
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
- andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
+ andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
orr(temp_reg, temp_reg, temp2_reg);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@@ -2236,7 +2236,7 @@
MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock());
- // reload markOop in mark_reg before continuing with lightweight locking
+ // reload markWord in mark_reg before continuing with lightweight locking
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
// Fall through to the normal CAS-based lock, because no matter what
@@ -2264,9 +2264,9 @@
// the bias bit would be clear.
ld(temp_reg, 0, mark_addr);
- andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
-
- cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+ andi(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
+
+ cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
beq(cr_reg, done);
}
@@ -2687,7 +2687,7 @@
load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
bind(L_rtm_retry);
}
- andi_(R0, mark_word, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+ andi_(R0, mark_word, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
bne(CCR0, IsInflated);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@@ -2705,10 +2705,10 @@
}
tbegin_();
beq(CCR0, L_on_abort);
- ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
- andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
- beq(flag, DONE_LABEL); // all done if unlocked
+ ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
+ andi(R0, mark_word, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
+ beq(flag, DONE_LABEL); // all done if unlocked
if (UseRTMXendForLockBusy) {
tend_();
@@ -2744,9 +2744,9 @@
assert(UseRTMLocking, "why call this otherwise?");
Label L_rtm_retry, L_decrement_retry, L_on_abort;
// Clean monitor_value bit to get valid pointer.
- int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
-
- // Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark().
+ int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markWord::monitor_value;
+
+ // Store non-null, using boxReg instead of (intptr_t)markWord::unused_mark().
std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
const Register tmpReg = boxReg;
const Register owner_addr_Reg = mark_word;
@@ -2791,7 +2791,7 @@
// Restore owner_addr_Reg
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);
#ifdef ASSERT
- andi_(R0, mark_word, markOopDesc::monitor_value);
+ andi_(R0, mark_word, markWord::monitor_value);
asm_assert_ne("must be inflated", 0xa754); // Deflating only allowed at safepoint.
#endif
addi(owner_addr_Reg, mark_word, owner_offset);
@@ -2833,7 +2833,7 @@
Label object_has_monitor;
Label cas_failed;
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
@@ -2851,11 +2851,11 @@
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
- andi_(temp, displaced_header, markOopDesc::monitor_value);
+ andi_(temp, displaced_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
- // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
- ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+ // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
+ ori(displaced_header, displaced_header, markWord::unlocked_value);
// Load Compare Value application register.
@@ -2863,7 +2863,7 @@
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
- // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
+ // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/displaced_header,
@@ -2883,10 +2883,10 @@
bind(cas_failed);
// We did not see an unlocked object so try the fast recursive case.
- // Check if the owner is self by comparing the value in the markOop of object
+ // Check if the owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
- load_const_optimized(temp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
+ load_const_optimized(temp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, temp);
// If condition is true we are cont and hence we can store 0 as the
@@ -2910,7 +2910,7 @@
#endif // INCLUDE_RTM_OPT
// Try to CAS m->owner from NULL to current thread.
- addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
+ addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value);
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/(intptr_t)0,
@@ -2957,12 +2957,12 @@
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
- ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
- andi(R0, current_header, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
- bne(flag, L_regular_unlock); // else RegularLock
- tend_(); // otherwise end...
- b(cont); // ... and we're done
+ ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
+ andi(R0, current_header, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
+ bne(flag, L_regular_unlock); // else RegularLock
+ tend_(); // otherwise end...
+ b(cont); // ... and we're done
bind(L_regular_unlock);
}
#endif
@@ -2978,11 +2978,11 @@
// The object has an existing monitor iff (mark & monitor_value) != 0.
RTM_OPT_ONLY( if (!(UseRTMForStackLocks && use_rtm)) ) // skip load if already done
ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
- andi_(R0, current_header, markOopDesc::monitor_value);
+ andi_(R0, current_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
// Check if it is still a light weight lock, this is is true if we see
- // the stack address of the basicLock in the markOop of the object.
+ // the stack address of the basicLock in the markWord of the object.
// Cmpxchg sets flag to cmpd(current_header, box).
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
@@ -3000,7 +3000,7 @@
b(cont);
bind(object_has_monitor);
- addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
+ addi(current_header, current_header, -markWord::monitor_value); // monitor
ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
// It's inflated.
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3820,7 +3820,7 @@
if (UseBiasedLocking) {
__ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
} else {
- __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
+ __ load_const_optimized(Rscratch, markWord::prototype().value(), R0);
}
__ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -96,7 +96,7 @@
}
// and mark it as unlocked.
- z_oill(hdr, markOopDesc::unlocked_value);
+ z_oill(hdr, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
// Test if object header is still the same (i.e. unlocked), and if so, store the
@@ -115,19 +115,19 @@
// If the object header was not the same, it is now in the hdr register.
// => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
- // 1) (hdr & markOopDesc::lock_mask_in_place) == 0
+ // 1) (hdr & markWord::lock_mask_in_place) == 0
// 2) rsp <= hdr
// 3) hdr <= rsp + page_size
//
// These 3 tests can be done by evaluating the following expression:
//
- // (hdr - Z_SP) & (~(page_size-1) | markOopDesc::lock_mask_in_place)
+ // (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
z_sgr(hdr, Z_SP);
- load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
// For recursive locking, the result is zero. => Save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking).
@@ -192,7 +192,7 @@
z_lg(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t.
- load_const_optimized(t1, (intx)markOopDesc::prototype());
+ load_const_optimized(t1, (intx)markWord::prototype().value());
}
z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -41,14 +41,14 @@
void initialize_body(Register objectFields, Register len_in_bytes, Register Rzero);
// locking
- // hdr : Used to hold locked markOop to be CASed into obj, contents destroyed.
+ // hdr : Used to hold locked markWord to be CASed into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents preserved.
// Returns code offset at which to add null check debug information.
void lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
// unlocking
- // hdr : Used to hold original markOop to be CASed back into obj, contents destroyed.
+ // hdr : Used to hold original markWord to be CASed back into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents destroyed.
void unlock_object(Register hdr, Register obj, Register lock, Label& slow_case);
--- a/src/hotspot/cpu/s390/globals_s390.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/s390/globals_s390.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -33,9 +33,6 @@
// (see globals.hpp)
// Sorted according to sparc.
-// z/Architecture remembers branch targets, so don't share vtables.
-define_pd_global(bool, ShareVtableStubs, true);
-
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -974,7 +974,7 @@
// template code:
//
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
@@ -993,17 +993,17 @@
NearLabel done;
NearLabel slow_case;
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
z_lg(displaced_header, oopDesc::mark_offset_in_bytes(), object);
if (UseBiasedLocking) {
biased_locking_enter(object, displaced_header, Z_R1, Z_R0, done, &slow_case);
}
- // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
- z_oill(displaced_header, markOopDesc::unlocked_value);
+ // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
+ z_oill(displaced_header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
@@ -1027,7 +1027,7 @@
// We did not see an unlocked object so try the fast recursive case.
- // Check if owner is self by comparing the value in the markOop of object
+ // Check if owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
z_sgr(current_header, Z_SP);
@@ -1035,7 +1035,7 @@
// The prior sequence "LGR, NGR, LTGR" can be done better
// (Z_R1 is temp and not used after here).
- load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(Z_R0, current_header); // AND sets CC (result eq/ne 0)
// If condition is true we are done and hence we can store 0 in the displaced
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3198,15 +3198,15 @@
// whether the epoch is still valid.
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits.
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
"biased locking makes assumptions about bit layout");
z_lr(temp_reg, mark_reg);
- z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
- z_chi(temp_reg, markOopDesc::biased_lock_pattern);
+ z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
+ z_chi(temp_reg, markWord::biased_lock_pattern);
z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked.
load_prototype_header(temp_reg, obj_reg);
- load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
+ load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
z_ogr(temp_reg, Z_thread);
z_xgr(temp_reg, mark_reg);
@@ -3232,7 +3232,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
+ z_tmll(temp_reg, markWord::biased_lock_mask_in_place);
z_brnaz(try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -3244,7 +3244,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
+ z_tmll(temp_reg, markWord::epoch_mask_in_place);
z_brnaz(try_rebias);
//----------------------------------------------------------------------------
@@ -3254,8 +3254,8 @@
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
- z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
- markOopDesc::epoch_mask_in_place);
+ z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place |
+ markWord::epoch_mask_in_place);
z_lgr(temp_reg, Z_thread);
z_llgfr(mark_reg, mark_reg);
z_ogr(temp_reg, mark_reg);
@@ -3287,7 +3287,7 @@
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
- z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
load_prototype_header(temp_reg, obj_reg);
z_llgfr(mark_reg, mark_reg);
@@ -3348,9 +3348,9 @@
BLOCK_COMMENT("biased_locking_exit {");
z_lg(temp_reg, 0, mark_addr);
- z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
-
- z_chi(temp_reg, markOopDesc::biased_lock_pattern);
+ z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
+
+ z_chi(temp_reg, markWord::biased_lock_pattern);
z_bre(done);
BLOCK_COMMENT("} biased_locking_exit");
}
@@ -3363,7 +3363,7 @@
BLOCK_COMMENT("compiler_fast_lock_object {");
- // Load markOop from oop into mark.
+ // Load markWord from oop into mark.
z_lg(displacedHeader, 0, oop);
if (try_bias) {
@@ -3372,13 +3372,13 @@
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
- guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
+ guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
z_lr(temp, displacedHeader);
- z_nill(temp, markOopDesc::monitor_value);
+ z_nill(temp, markWord::monitor_value);
z_brne(object_has_monitor);
- // Set mark to markOop | markOopDesc::unlocked_value.
- z_oill(displacedHeader, markOopDesc::unlocked_value);
+ // Set mark to markWord | markWord::unlocked_value.
+ z_oill(displacedHeader, markWord::unlocked_value);
// Load Compare Value application register.
@@ -3386,7 +3386,7 @@
z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
// Memory Fence (in cmpxchgd)
- // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
+ // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
// If the compare-and-swap succeeded, then we found an unlocked object and we
// have now locked it.
@@ -3397,7 +3397,7 @@
// We did not see an unlocked object so try the fast recursive case.
z_sgr(currentHeader, Z_SP);
- load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(currentHeader, temp);
// z_brne(done);
@@ -3407,7 +3407,7 @@
z_bru(done);
Register zero = temp;
- Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
+ Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
bind(object_has_monitor);
// The object's monitor m is unlocked iff m->owner == NULL,
// otherwise m->owner may contain a thread or a stack address.
@@ -3456,12 +3456,12 @@
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
- guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
- z_nill(currentHeader, markOopDesc::monitor_value);
+ guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
+ z_nill(currentHeader, markWord::monitor_value);
z_brne(object_has_monitor);
// Check if it is still a light weight lock, this is true if we see
- // the stack address of the basicLock in the markOop of the object
+ // the stack address of the basicLock in the markWord of the object
// copy box to currentHeader such that csg does not kill it.
z_lgr(currentHeader, box);
z_csg(currentHeader, displacedHeader, 0, oop);
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3880,7 +3880,7 @@
__ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes()));
} else {
__ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
- (long)markOopDesc::prototype());
+ (long)markWord::prototype().value());
}
__ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops.
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -97,12 +97,12 @@
mov(Rbox, Rscratch);
// and mark it unlocked
- or3(Rmark, markOopDesc::unlocked_value, Rmark);
+ or3(Rmark, markWord::unlocked_value, Rmark);
// save unlocked object header into the displaced header location on the stack
st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
- // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
+ // compare object markWord with Rmark and if equal exchange Rscratch with object markWord
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// if compare/exchange succeeded we found an unlocked object and we now have locked it
@@ -144,7 +144,7 @@
}
// Check if it is still a light weight lock, this is is true if we see
- // the stack address of the basicLock in the markOop of the object
+ // the stack address of the basicLock in the markWord of the object
cas_ptr(mark_addr.base(), Rbox, Rmark);
cmp(Rbox, Rmark);
@@ -179,7 +179,7 @@
if (UseBiasedLocking && !len->is_valid()) {
ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1);
} else {
- set((intx)markOopDesc::prototype(), t1);
+ set((intx)markWord::prototype().value(), t1);
}
st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
if (UseCompressedClassPointers) {
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -37,7 +37,6 @@
// the load of the dispatch address and hence the jmp would still go to the location
// according to the prior table. So, we let the thread continue and let it block by itself.
define_pd_global(bool, DontYieldALot, true); // yield no more than 100 times per second
-define_pd_global(bool, ShareVtableStubs, false); // improves performance markedly for mtrt and compress
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on sparc.
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1200,7 +1200,7 @@
assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
- // load markOop from object into mark_reg
+ // load markWord from object into mark_reg
ld_ptr(mark_addr, mark_reg);
if (UseBiasedLocking) {
@@ -1211,11 +1211,11 @@
// we need a temporary register here as we do not want to clobber lock_reg
// (cas clobbers the destination register)
mov(lock_reg, temp_reg);
- // set mark reg to be (markOop of object | UNLOCK_VALUE)
- or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
+ // set mark reg to be (markWord of object | UNLOCK_VALUE)
+ or3(mark_reg, markWord::unlocked_value, mark_reg);
// initialize the box (Must happen before we update the object mark!)
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
- // compare and exchange object_addr, markOop | 1, stack address of basicLock
+ // compare and exchange object_addr, markWord | 1, stack address of basicLock
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
@@ -1224,7 +1224,7 @@
// We did not see an unlocked object so try the fast recursive case
- // Check if owner is self by comparing the value in the markOop of object
+ // Check if owner is self by comparing the value in the markWord of object
// with the stack pointer
sub(temp_reg, SP, temp_reg);
sub(temp_reg, STACK_BIAS, temp_reg);
@@ -1234,7 +1234,7 @@
// (a) %sp -vs- markword proximity check, and,
// (b) verify mark word LSBs == 0 (Stack-locked).
//
- // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
+ // FFFFF003/FFFFFFFFFFFF003 is (markWord::lock_mask_in_place | -os::vm_page_size())
// Note that the page size used for %sp proximity testing is arbitrary and is
// unrelated to the actual MMU page size. We use a 'logical' page size of
// 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -2452,15 +2452,15 @@
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
- and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
- cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ and3(mark_reg, markWord::biased_lock_mask_in_place, temp_reg);
+ cmp_and_brx_short(temp_reg, markWord::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
- andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
+ andcc(temp_reg, ~((int) markWord::age_mask_in_place), temp_reg);
if (counters != NULL) {
cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
// Reload mark_reg as we may need it later
@@ -2483,7 +2483,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
+ btst(markWord::biased_lock_mask_in_place, temp_reg);
brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -2495,7 +2495,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
+ delayed()->btst(markWord::epoch_mask_in_place, temp_reg);
brx(Assembler::notZero, false, Assembler::pn, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@@ -2505,7 +2505,7 @@
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
delayed()->and3(mark_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
+ markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place,
mark_reg);
or3(G2_thread, mark_reg, temp_reg);
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
@@ -2586,8 +2586,8 @@
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
ld_ptr(mark_addr, temp_reg);
- and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
- cmp(temp_reg, markOopDesc::biased_lock_pattern);
+ and3(temp_reg, markWord::biased_lock_mask_in_place, temp_reg);
+ cmp(temp_reg, markWord::biased_lock_pattern);
brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
delayed();
if (!allow_delay_slot_filling) {
@@ -2603,12 +2603,12 @@
// box->dhw disposition - post-conditions at DONE_LABEL.
// - Successful inflated lock: box->dhw != 0.
// Any non-zero value suffices.
-// Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark()
+// Consider G2_thread, rsp, boxReg, or markWord::unused_mark()
// - Successful Stack-lock: box->dhw == mark.
// box->dhw must contain the displaced mark word value
// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
// The slow-path fast_enter() and slow_enter() operators
-// are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()).
+// are responsible for setting box->dhw = NonZero (typically markWord::unused_mark()).
// - Biased: box->dhw is undefined
//
// SPARC refworkload performance - specifically jetstream and scimark - are
@@ -2658,7 +2658,7 @@
// This presumes TSO, of course.
mov(0, Rscratch);
- or3(Rmark, markOopDesc::unlocked_value, Rmark);
+ or3(Rmark, markWord::unlocked_value, Rmark);
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
@@ -2712,7 +2712,7 @@
// set icc.zf : 1=success 0=failure
// ST box->displaced_header = NonZero.
// Any non-zero value suffices:
- // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
+ // markWord::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
// Intentional fall-through into done
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1835,19 +1835,19 @@
// hash_mask_in_place because it could be larger than 32 bits in a 64-bit
// vm: see markOop.hpp.
__ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
- __ sethi(markOopDesc::hash_mask, mask);
- __ btst(markOopDesc::unlocked_value, header);
+ __ sethi(markWord::hash_mask, mask);
+ __ btst(markWord::unlocked_value, header);
__ br(Assembler::zero, false, Assembler::pn, slowCase);
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
__ delayed()->nop();
- __ btst(markOopDesc::biased_lock_bit_in_place, header);
+ __ btst(markWord::biased_lock_bit_in_place, header);
__ br(Assembler::notZero, false, Assembler::pn, slowCase);
}
- __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
+ __ delayed()->or3(mask, markWord::hash_mask & 0x3ff, mask);
// Check for a valid (non-zero) hash code and get its value.
- __ srlx(header, markOopDesc::hash_shift, hash);
+ __ srlx(header, markWord::hash_shift, hash);
__ andcc(hash, mask, hash);
__ br(Assembler::equal, false, Assembler::pn, slowCase);
__ delayed()->nop();
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3517,7 +3517,7 @@
if (UseBiasedLocking) {
__ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
} else {
- __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
+ __ set((intptr_t)markWord::prototype().value(), G4_scratch);
}
__ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
__ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1380,6 +1380,15 @@
emit_int8(0xC0 | encode);
}
+void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(VM_Version::supports_vaes(), "requires vaes support/enabling");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0xDC);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
void Assembler::aesenclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
@@ -1397,6 +1406,15 @@
emit_int8((unsigned char)(0xC0 | encode));
}
+void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+ assert(VM_Version::supports_vaes(), "requires vaes support/enabling");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0xDD);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
void Assembler::andl(Address dst, int32_t imm32) {
InstructionMark im(this);
prefix(dst);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -968,6 +968,9 @@
void aesenc(XMMRegister dst, XMMRegister src);
void aesenclast(XMMRegister dst, Address src);
void aesenclast(XMMRegister dst, XMMRegister src);
+ // Vector AES instructions
+ void vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -61,7 +61,7 @@
// Load object header
movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
- orptr(hdr, markOopDesc::unlocked_value);
+ orptr(hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
movptr(Address(disp_hdr, 0), hdr);
// test if object header is still the same (i.e. unlocked), and if so, store the
@@ -156,7 +156,7 @@
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else {
// This assumes that all prototype bits fit in an int32_t
- movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markWord::prototype().value());
}
#ifdef _LP64
if (UseCompressedClassPointers) { // Take care not to kill klass
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -350,9 +350,9 @@
Label done;
__ movptr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ notptr(tmp);
- __ testb(tmp, markOopDesc::marked_value);
+ __ testb(tmp, markWord::marked_value);
__ jccb(Assembler::notZero, done);
- __ orptr(tmp, markOopDesc::marked_value);
+ __ orptr(tmp, markWord::marked_value);
__ notptr(tmp);
__ mov(dst, tmp);
__ bind(done);
@@ -807,8 +807,15 @@
__ mov(tmp1, res);
__ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
__ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
+#ifdef _LP64
__ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
__ testbool(tmp2);
+#else
+ // On x86_32, C1 register allocator can give us the register without 8-bit support.
+ // Do the full-register access and test to avoid compilation failures.
+ __ movptr(tmp2, Address(tmp2, tmp1, Address::times_1));
+ __ testptr(tmp2, 0xFF);
+#endif
__ jcc(Assembler::zero, *stub->continuation());
// Test if object is resolved.
@@ -816,10 +823,16 @@
// Test if both lowest bits are set. We trick it by negating the bits
// then test for both bits clear.
__ notptr(tmp1);
- __ testb(tmp1, markOopDesc::marked_value);
+#ifdef _LP64
+ __ testb(tmp1, markWord::marked_value);
+#else
+ // On x86_32, C1 register allocator can give us the register without 8-bit support.
+ // Do the full-register access and test to avoid compilation failures.
+ __ testptr(tmp1, markWord::marked_value);
+#endif
__ jccb(Assembler::notZero, slow_path);
// Clear both lower bits. It's still inverted, so set them, and then invert back.
- __ orptr(tmp1, markOopDesc::marked_value);
+ __ orptr(tmp1, markWord::marked_value);
__ notptr(tmp1);
// At this point, tmp1 contains the decoded forwarding pointer.
__ mov(res, tmp1);
@@ -898,8 +911,8 @@
// arg0 : object to be resolved
__ save_live_registers_no_oop_map(true);
- __ load_parameter(0, c_rarg0);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), c_rarg0);
+ __ load_parameter(0, LP64_ONLY(c_rarg0) NOT_LP64(rax));
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), LP64_ONLY(c_rarg0) NOT_LP64(rax));
__ restore_live_registers_except_rax(true);
__ epilogue();
@@ -950,10 +963,10 @@
// Test if both lowest bits are set. We trick it by negating the bits
// then test for both bits clear.
__ notptr(tmp2);
- __ testb(tmp2, markOopDesc::marked_value);
+ __ testb(tmp2, markWord::marked_value);
__ jccb(Assembler::notZero, slow_path);
// Clear both lower bits. It's still inverted, so set them, and then invert back.
- __ orptr(tmp2, markOopDesc::marked_value);
+ __ orptr(tmp2, markWord::marked_value);
__ notptr(tmp2);
// At this point, tmp2 contains the decoded forwarding pointer.
__ mov(rax, tmp2);
--- a/src/hotspot/cpu/x86/globals_x86.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/globals_x86.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -31,8 +31,6 @@
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-define_pd_global(bool, ShareVtableStubs, true);
-
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1115,7 +1115,7 @@
assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
assert(tmp_reg != noreg, "tmp_reg must be supplied");
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
NOT_LP64( Address saved_mark_addr(lock_reg, 0); )
@@ -1135,8 +1135,8 @@
movptr(swap_reg, mark_addr);
}
movptr(tmp_reg, swap_reg);
- andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
+ andptr(tmp_reg, markWord::biased_lock_mask_in_place);
+ cmpptr(tmp_reg, markWord::biased_lock_pattern);
jcc(Assembler::notEqual, cas_label);
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
@@ -1162,7 +1162,7 @@
xorptr(swap_reg, tmp_reg);
Register header_reg = swap_reg;
#endif
- andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
+ andptr(header_reg, ~((int) markWord::age_mask_in_place));
if (counters != NULL) {
cond_inc32(Assembler::zero,
ExternalAddress((address) counters->biased_lock_entry_count_addr()));
@@ -1181,7 +1181,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
+ testptr(header_reg, markWord::biased_lock_mask_in_place);
jccb(Assembler::notZero, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -1193,7 +1193,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- testptr(header_reg, markOopDesc::epoch_mask_in_place);
+ testptr(header_reg, markWord::epoch_mask_in_place);
jccb(Assembler::notZero, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@@ -1204,7 +1204,7 @@
// don't accidentally blow away another thread's valid bias.
NOT_LP64( movptr(swap_reg, saved_mark_addr); )
andptr(swap_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
#ifdef _LP64
movptr(tmp_reg, swap_reg);
orptr(tmp_reg, r15_thread);
@@ -1298,8 +1298,8 @@
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
+ andptr(temp_reg, markWord::biased_lock_mask_in_place);
+ cmpptr(temp_reg, markWord::biased_lock_pattern);
jcc(Assembler::equal, done);
}
@@ -1486,7 +1486,7 @@
bind(L_rtm_retry);
}
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
- testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+ testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
jcc(Assembler::notZero, IsInflated);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@@ -1501,8 +1501,8 @@
}
xbegin(L_on_abort);
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
- andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
+ andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
@@ -1528,7 +1528,7 @@
// Use RTM for inflating locks
// inputs: objReg (object to lock)
// boxReg (on-stack box address (displaced header location) - KILLED)
-// tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
+// tmpReg (ObjectMonitor address + markWord::monitor_value)
void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
Register scrReg, Register retry_on_busy_count_Reg,
Register retry_on_abort_count_Reg,
@@ -1542,7 +1542,7 @@
int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
// Without cast to int32_t a movptr will destroy r10 which is typically obj
- movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+ movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value()));
movptr(boxReg, tmpReg); // Save ObjectMonitor address
if (RTMRetryCount > 0) {
@@ -1748,11 +1748,11 @@
#endif // INCLUDE_RTM_OPT
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // [FETCH]
- testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+ testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
jccb(Assembler::notZero, IsInflated);
// Attempt stack-locking ...
- orptr (tmpReg, markOopDesc::unlocked_value);
+ orptr (tmpReg, markWord::unlocked_value);
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
lock();
cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
@@ -1776,7 +1776,7 @@
jmp(DONE_LABEL);
bind(IsInflated);
- // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
+ // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
#if INCLUDE_RTM_OPT
// Use the same RTM locking code in 32- and 64-bit VM.
@@ -1791,7 +1791,7 @@
// boxReg refers to the on-stack BasicLock in the current frame.
// We'd like to write:
- // set box->_displaced_header = markOopDesc::unused_mark(). Any non-0 value suffices.
+ // set box->_displaced_header = markWord::unused_mark(). Any non-0 value suffices.
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
// additional latency as we have another ST in the store buffer that must drain.
@@ -1836,9 +1836,9 @@
lock();
cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
- // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
+ // Unconditionally set box->_displaced_header = markWord::unused_mark().
// Without cast to int32_t movptr will destroy r10 which is typically obj.
- movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+ movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value()));
// Intentional fall-through into DONE_LABEL ...
// Propagate ICC.ZF from CAS above into DONE_LABEL.
#endif // _LP64
@@ -1906,20 +1906,20 @@
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
- movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
- andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
- jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
- xend(); // otherwise end...
- jmp(DONE_LABEL); // ... and we're done
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
+ andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
+ jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
+ xend(); // otherwise end...
+ jmp(DONE_LABEL); // ... and we're done
bind(L_regular_unlock);
}
#endif
- cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
- jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
- movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
- testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
+ cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
+ jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
+ testptr(tmpReg, markWord::monitor_value); // Inflated?
jccb (Assembler::zero, Stacked);
// It's inflated.
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -982,6 +982,17 @@
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
XMMRegister shuf_mask);
+private:
+ void roundEnc(XMMRegister key, int rnum);
+ void lastroundEnc(XMMRegister key, int rnum);
+ void roundDec(XMMRegister key, int rnum);
+ void lastroundDec(XMMRegister key, int rnum);
+ void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask);
+
+public:
+ void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len);
+ void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len);
+
#endif
void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
--- a/src/hotspot/cpu/x86/macroAssembler_x86_aes.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/macroAssembler_x86_aes.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -30,6 +30,463 @@
#include "macroAssembler_x86.hpp"
#ifdef _LP64
+
+void MacroAssembler::roundEnc(XMMRegister key, int rnum) {
+ for (int xmm_reg_no = 0; xmm_reg_no <=rnum; xmm_reg_no++) {
+ vaesenc(as_XMMRegister(xmm_reg_no), as_XMMRegister(xmm_reg_no), key, Assembler::AVX_512bit);
+ }
+}
+
+void MacroAssembler::lastroundEnc(XMMRegister key, int rnum) {
+ for (int xmm_reg_no = 0; xmm_reg_no <=rnum; xmm_reg_no++) {
+ vaesenclast(as_XMMRegister(xmm_reg_no), as_XMMRegister(xmm_reg_no), key, Assembler::AVX_512bit);
+ }
+}
+
+void MacroAssembler::roundDec(XMMRegister key, int rnum) {
+ for (int xmm_reg_no = 0; xmm_reg_no <=rnum; xmm_reg_no++) {
+ vaesdec(as_XMMRegister(xmm_reg_no), as_XMMRegister(xmm_reg_no), key, Assembler::AVX_512bit);
+ }
+}
+
+void MacroAssembler::lastroundDec(XMMRegister key, int rnum) {
+ for (int xmm_reg_no = 0; xmm_reg_no <=rnum; xmm_reg_no++) {
+ vaesdeclast(as_XMMRegister(xmm_reg_no), as_XMMRegister(xmm_reg_no), key, Assembler::AVX_512bit);
+ }
+}
+
+// Load key and shuffle operation
+void MacroAssembler::ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
+ movdqu(xmmdst, Address(key, offset));
+ if (xmm_shuf_mask != NULL) {
+ pshufb(xmmdst, xmm_shuf_mask);
+ } else {
+ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+ }
+ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit);
+}
+
+// AES-ECB Encrypt Operation
+void MacroAssembler::aesecb_encrypt(Register src_addr, Register dest_addr, Register key, Register len) {
+
+ const Register pos = rax;
+ const Register rounds = r12;
+
+ Label NO_PARTS, LOOP, Loop_start, LOOP2, AES192, END_LOOP, AES256, REMAINDER, LAST2, END, KEY_192, KEY_256, EXIT;
+ push(r13);
+ push(r12);
+
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ if (VM_Version::supports_avx512vlbw()) {
+ movl(rax, 0xffff);
+ kmovql(k1, rax);
+ }
+ push(len); // Save
+ push(rbx);
+
+ vzeroupper();
+
+ xorptr(pos, pos);
+
+ // Calculate number of rounds based on key length(128, 192, 256):44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds
+ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+
+ // Load Key shuf mask
+ const XMMRegister xmm_key_shuf_mask = xmm31; // used temporarily to swap key bytes up front
+ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+
+ // Load and shuffle key based on number of rounds
+ ev_load_key(xmm8, key, 0 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm9, key, 1 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm10, key, 2 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm23, key, 3 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm12, key, 4 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm13, key, 5 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm14, key, 6 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm15, key, 7 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm16, key, 8 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm17, key, 9 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm24, key, 10 * 16, xmm_key_shuf_mask);
+ cmpl(rounds, 52);
+ jcc(Assembler::greaterEqual, KEY_192);
+ jmp(Loop_start);
+
+ bind(KEY_192);
+ ev_load_key(xmm19, key, 11 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm20, key, 12 * 16, xmm_key_shuf_mask);
+ cmpl(rounds, 60);
+ jcc(Assembler::equal, KEY_256);
+ jmp(Loop_start);
+
+ bind(KEY_256);
+ ev_load_key(xmm21, key, 13 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm22, key, 14 * 16, xmm_key_shuf_mask);
+
+ bind(Loop_start);
+ movq(rbx, len);
+ // Divide length by 16 to convert it to number of blocks
+ shrq(len, 4);
+ shlq(rbx, 60);
+ jcc(Assembler::equal, NO_PARTS);
+ addq(len, 1);
+ // Check if number of blocks is greater than or equal to 32
+ // If true, 512 bytes are processed at a time (code marked by label LOOP)
+ // If not, 16 bytes are processed (code marked by REMAINDER label)
+ bind(NO_PARTS);
+ movq(rbx, len);
+ shrq(len, 5);
+ jcc(Assembler::equal, REMAINDER);
+ movl(r13, len);
+ // Compute number of blocks that will be processed 512 bytes at a time
+ // Subtract this from the total number of blocks which will then be processed by REMAINDER loop
+ shlq(r13, 5);
+ subq(rbx, r13);
+ //Begin processing 512 bytes
+ bind(LOOP);
+ // Move 64 bytes of PT data into a zmm register, as a result 512 bytes of PT loaded in zmm0-7
+ evmovdquq(xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm2, Address(src_addr, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm3, Address(src_addr, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm4, Address(src_addr, pos, Address::times_1, 4 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm5, Address(src_addr, pos, Address::times_1, 5 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm6, Address(src_addr, pos, Address::times_1, 6 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm7, Address(src_addr, pos, Address::times_1, 7 * 64), Assembler::AVX_512bit);
+ // Xor with the first round key
+ evpxorq(xmm0, xmm0, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm1, xmm1, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm2, xmm2, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm3, xmm3, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm4, xmm4, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm5, xmm5, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm6, xmm6, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm7, xmm7, xmm8, Assembler::AVX_512bit);
+ // 9 Aes encode round operations
+ roundEnc(xmm9, 7);
+ roundEnc(xmm10, 7);
+ roundEnc(xmm23, 7);
+ roundEnc(xmm12, 7);
+ roundEnc(xmm13, 7);
+ roundEnc(xmm14, 7);
+ roundEnc(xmm15, 7);
+ roundEnc(xmm16, 7);
+ roundEnc(xmm17, 7);
+ cmpl(rounds, 52);
+ jcc(Assembler::aboveEqual, AES192);
+ // Aesenclast round operation for keysize = 128
+ lastroundEnc(xmm24, 7);
+ jmp(END_LOOP);
+ //Additional 2 rounds of Aesenc operation for keysize = 192
+ bind(AES192);
+ roundEnc(xmm24, 7);
+ roundEnc(xmm19, 7);
+ cmpl(rounds, 60);
+ jcc(Assembler::aboveEqual, AES256);
+ // Aesenclast round for keysize = 192
+ lastroundEnc(xmm20, 7);
+ jmp(END_LOOP);
+ // 2 rounds of Aesenc operation and Aesenclast for keysize = 256
+ bind(AES256);
+ roundEnc(xmm20, 7);
+ roundEnc(xmm21, 7);
+ lastroundEnc(xmm22, 7);
+
+ bind(END_LOOP);
+ // Move 512 bytes of CT to destination
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 0 * 64), xmm0, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 1 * 64), xmm1, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 2 * 64), xmm2, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 3 * 64), xmm3, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 4 * 64), xmm4, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 5 * 64), xmm5, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 6 * 64), xmm6, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 7 * 64), xmm7, Assembler::AVX_512bit);
+
+ addq(pos, 512);
+ decq(len);
+ jcc(Assembler::notEqual, LOOP);
+
+ bind(REMAINDER);
+ vzeroupper();
+ cmpq(rbx, 0);
+ jcc(Assembler::equal, END);
+ // Process 16 bytes at a time
+ bind(LOOP2);
+ movdqu(xmm1, Address(src_addr, pos, Address::times_1, 0));
+ vpxor(xmm1, xmm1, xmm8, Assembler::AVX_128bit);
+ // xmm2 contains shuffled key for Aesenclast operation.
+ vmovdqu(xmm2, xmm24);
+
+ vaesenc(xmm1, xmm1, xmm9, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm10, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm23, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm12, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm13, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm14, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm15, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm16, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm17, Assembler::AVX_128bit);
+
+ cmpl(rounds, 52);
+ jcc(Assembler::below, LAST2);
+ vmovdqu(xmm2, xmm20);
+ vaesenc(xmm1, xmm1, xmm24, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm19, Assembler::AVX_128bit);
+ cmpl(rounds, 60);
+ jcc(Assembler::below, LAST2);
+ vmovdqu(xmm2, xmm22);
+ vaesenc(xmm1, xmm1, xmm20, Assembler::AVX_128bit);
+ vaesenc(xmm1, xmm1, xmm21, Assembler::AVX_128bit);
+
+ bind(LAST2);
+ // Aesenclast round
+ vaesenclast(xmm1, xmm1, xmm2, Assembler::AVX_128bit);
+ // Write 16 bytes of CT to destination
+ movdqu(Address(dest_addr, pos, Address::times_1, 0), xmm1);
+ addq(pos, 16);
+ decq(rbx);
+ jcc(Assembler::notEqual, LOOP2);
+
+ bind(END);
+ // Zero out the round keys
+ evpxorq(xmm8, xmm8, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm9, xmm9, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm10, xmm10, xmm10, Assembler::AVX_512bit);
+ evpxorq(xmm23, xmm23, xmm23, Assembler::AVX_512bit);
+ evpxorq(xmm12, xmm12, xmm12, Assembler::AVX_512bit);
+ evpxorq(xmm13, xmm13, xmm13, Assembler::AVX_512bit);
+ evpxorq(xmm14, xmm14, xmm14, Assembler::AVX_512bit);
+ evpxorq(xmm15, xmm15, xmm15, Assembler::AVX_512bit);
+ evpxorq(xmm16, xmm16, xmm16, Assembler::AVX_512bit);
+ evpxorq(xmm17, xmm17, xmm17, Assembler::AVX_512bit);
+ evpxorq(xmm24, xmm24, xmm24, Assembler::AVX_512bit);
+ cmpl(rounds, 44);
+ jcc(Assembler::belowEqual, EXIT);
+ evpxorq(xmm19, xmm19, xmm19, Assembler::AVX_512bit);
+ evpxorq(xmm20, xmm20, xmm20, Assembler::AVX_512bit);
+ cmpl(rounds, 52);
+ jcc(Assembler::belowEqual, EXIT);
+ evpxorq(xmm21, xmm21, xmm21, Assembler::AVX_512bit);
+ evpxorq(xmm22, xmm22, xmm22, Assembler::AVX_512bit);
+ bind(EXIT);
+ pop(rbx);
+ pop(rax); // return length
+ pop(r12);
+ pop(r13);
+}
+
+// AES-ECB Decrypt Operation
+void MacroAssembler::aesecb_decrypt(Register src_addr, Register dest_addr, Register key, Register len) {
+
+ Label NO_PARTS, LOOP, Loop_start, LOOP2, AES192, END_LOOP, AES256, REMAINDER, LAST2, END, KEY_192, KEY_256, EXIT;
+ const Register pos = rax;
+ const Register rounds = r12;
+ push(r13);
+ push(r12);
+
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ if (VM_Version::supports_avx512vlbw()) {
+ movl(rax, 0xffff);
+ kmovql(k1, rax);
+ }
+
+ push(len); // Save
+ push(rbx);
+
+ vzeroupper();
+
+ xorptr(pos, pos);
+ // Calculate number of rounds i.e. based on key length(128, 192, 256):44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds
+ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
+
+ // Load Key shuf mask
+ const XMMRegister xmm_key_shuf_mask = xmm31; // used temporarily to swap key bytes up front
+ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
+
+ // Load and shuffle round keys. The java expanded key ordering is rotated one position in decryption.
+ // So the first round key is loaded from 1*16 here and last round key is loaded from 0*16
+ ev_load_key(xmm9, key, 1 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm10, key, 2 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm11, key, 3 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm12, key, 4 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm13, key, 5 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm14, key, 6 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm15, key, 7 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm16, key, 8 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm17, key, 9 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm18, key, 10 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm27, key, 0 * 16, xmm_key_shuf_mask);
+ cmpl(rounds, 52);
+ jcc(Assembler::greaterEqual, KEY_192);
+ jmp(Loop_start);
+
+ bind(KEY_192);
+ ev_load_key(xmm19, key, 11 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm20, key, 12 * 16, xmm_key_shuf_mask);
+ cmpl(rounds, 60);
+ jcc(Assembler::equal, KEY_256);
+ jmp(Loop_start);
+
+ bind(KEY_256);
+ ev_load_key(xmm21, key, 13 * 16, xmm_key_shuf_mask);
+ ev_load_key(xmm22, key, 14 * 16, xmm_key_shuf_mask);
+ bind(Loop_start);
+ movq(rbx, len);
+ // Convert input length to number of blocks
+ shrq(len, 4);
+ shlq(rbx, 60);
+ jcc(Assembler::equal, NO_PARTS);
+ addq(len, 1);
+ // Check if number of blocks is greater than/ equal to 32
+ // If true, blocks then 512 bytes are processed at a time (code marked by label LOOP)
+ // If not, 16 bytes are processed (code marked by label REMAINDER)
+ bind(NO_PARTS);
+ movq(rbx, len);
+ shrq(len, 5);
+ jcc(Assembler::equal, REMAINDER);
+ movl(r13, len);
+ // Compute number of blocks that will be processed as 512 bytes at a time
+ // Subtract this from the total number of blocks, which will then be processed by REMAINDER loop.
+ shlq(r13, 5);
+ subq(rbx, r13);
+
+ bind(LOOP);
+ // Move 64 bytes of CT data into a zmm register, as a result 512 bytes of CT loaded in zmm0-7
+ evmovdquq(xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm2, Address(src_addr, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm3, Address(src_addr, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm4, Address(src_addr, pos, Address::times_1, 4 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm5, Address(src_addr, pos, Address::times_1, 5 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm6, Address(src_addr, pos, Address::times_1, 6 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm7, Address(src_addr, pos, Address::times_1, 7 * 64), Assembler::AVX_512bit);
+ // Xor with the first round key
+ evpxorq(xmm0, xmm0, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm1, xmm1, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm2, xmm2, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm3, xmm3, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm4, xmm4, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm5, xmm5, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm6, xmm6, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm7, xmm7, xmm9, Assembler::AVX_512bit);
+ // 9 rounds of Aesdec
+ roundDec(xmm10, 7);
+ roundDec(xmm11, 7);
+ roundDec(xmm12, 7);
+ roundDec(xmm13, 7);
+ roundDec(xmm14, 7);
+ roundDec(xmm15, 7);
+ roundDec(xmm16, 7);
+ roundDec(xmm17, 7);
+ roundDec(xmm18, 7);
+ cmpl(rounds, 52);
+ jcc(Assembler::aboveEqual, AES192);
+ // Aesdeclast round for keysize = 128
+ lastroundDec(xmm27, 7);
+ jmp(END_LOOP);
+
+ bind(AES192);
+ // 2 Additional rounds for keysize = 192
+ roundDec(xmm19, 7);
+ roundDec(xmm20, 7);
+ cmpl(rounds, 60);
+ jcc(Assembler::aboveEqual, AES256);
+ // Aesdeclast round for keysize = 192
+ lastroundDec(xmm27, 7);
+ jmp(END_LOOP);
+ bind(AES256);
+ // 2 Additional rounds and Aesdeclast for keysize = 256
+ roundDec(xmm21, 7);
+ roundDec(xmm22, 7);
+ lastroundDec(xmm27, 7);
+
+ bind(END_LOOP);
+ // Write 512 bytes of PT to the destination
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 0 * 64), xmm0, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 1 * 64), xmm1, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 2 * 64), xmm2, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 3 * 64), xmm3, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 4 * 64), xmm4, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 5 * 64), xmm5, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 6 * 64), xmm6, Assembler::AVX_512bit);
+ evmovdquq(Address(dest_addr, pos, Address::times_1, 7 * 64), xmm7, Assembler::AVX_512bit);
+
+ addq(pos, 512);
+ decq(len);
+ jcc(Assembler::notEqual, LOOP);
+
+ bind(REMAINDER);
+ vzeroupper();
+ cmpq(rbx, 0);
+ jcc(Assembler::equal, END);
+ // Process 16 bytes at a time
+ bind(LOOP2);
+ movdqu(xmm1, Address(src_addr, pos, Address::times_1, 0));
+ vpxor(xmm1, xmm1, xmm9, Assembler::AVX_128bit);
+ // xmm2 contains shuffled key for Aesdeclast operation.
+ vmovdqu(xmm2, xmm27);
+
+ vaesdec(xmm1, xmm1, xmm10, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm11, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm12, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm13, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm14, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm15, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm16, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm17, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm18, Assembler::AVX_128bit);
+
+ cmpl(rounds, 52);
+ jcc(Assembler::below, LAST2);
+ vaesdec(xmm1, xmm1, xmm19, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm20, Assembler::AVX_128bit);
+ cmpl(rounds, 60);
+ jcc(Assembler::below, LAST2);
+ vaesdec(xmm1, xmm1, xmm21, Assembler::AVX_128bit);
+ vaesdec(xmm1, xmm1, xmm22, Assembler::AVX_128bit);
+
+ bind(LAST2);
+ // Aesdeclast round
+ vaesdeclast(xmm1, xmm1, xmm2, Assembler::AVX_128bit);
+ // Write 16 bytes of PT to destination
+ movdqu(Address(dest_addr, pos, Address::times_1, 0), xmm1);
+ addq(pos, 16);
+ decq(rbx);
+ jcc(Assembler::notEqual, LOOP2);
+
+ bind(END);
+ // Zero out the round keys
+ evpxorq(xmm8, xmm8, xmm8, Assembler::AVX_512bit);
+ evpxorq(xmm9, xmm9, xmm9, Assembler::AVX_512bit);
+ evpxorq(xmm10, xmm10, xmm10, Assembler::AVX_512bit);
+ evpxorq(xmm11, xmm11, xmm11, Assembler::AVX_512bit);
+ evpxorq(xmm12, xmm12, xmm12, Assembler::AVX_512bit);
+ evpxorq(xmm13, xmm13, xmm13, Assembler::AVX_512bit);
+ evpxorq(xmm14, xmm14, xmm14, Assembler::AVX_512bit);
+ evpxorq(xmm15, xmm15, xmm15, Assembler::AVX_512bit);
+ evpxorq(xmm16, xmm16, xmm16, Assembler::AVX_512bit);
+ evpxorq(xmm17, xmm17, xmm17, Assembler::AVX_512bit);
+ evpxorq(xmm18, xmm18, xmm18, Assembler::AVX_512bit);
+ evpxorq(xmm27, xmm27, xmm27, Assembler::AVX_512bit);
+ cmpl(rounds, 44);
+ jcc(Assembler::belowEqual, EXIT);
+ evpxorq(xmm19, xmm19, xmm19, Assembler::AVX_512bit);
+ evpxorq(xmm20, xmm20, xmm20, Assembler::AVX_512bit);
+ cmpl(rounds, 52);
+ jcc(Assembler::belowEqual, EXIT);
+ evpxorq(xmm21, xmm21, xmm21, Assembler::AVX_512bit);
+ evpxorq(xmm22, xmm22, xmm22, Assembler::AVX_512bit);
+ bind(EXIT);
+ pop(rbx);
+ pop(rax); // return length
+ pop(r12);
+ pop(r13);
+}
+
// Multiply 128 x 128 bits, using 4 pclmulqdq operations
void MacroAssembler::schoolbookAAD(int i, Register htbl, XMMRegister data,
XMMRegister tmp0, XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3) {
--- a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -59,12 +59,12 @@
__ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
// check if locked
- __ testptr(result, markOopDesc::unlocked_value);
+ __ testptr(result, markWord::unlocked_value);
__ jcc(Assembler::zero, slowCase);
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
- __ testptr(result, markOopDesc::biased_lock_bit_in_place);
+ __ testptr(result, markWord::biased_lock_bit_in_place);
__ jcc(Assembler::notZero, slowCase);
}
@@ -73,16 +73,16 @@
// Read the header and build a mask to get its hash field.
// Depend on hash_mask being at most 32 bits and avoid the use of hash_mask_in_place
// because it could be larger than 32 bits in a 64-bit vm. See markOop.hpp.
- __ shrptr(result, markOopDesc::hash_shift);
- __ andptr(result, markOopDesc::hash_mask);
+ __ shrptr(result, markWord::hash_shift);
+ __ andptr(result, markWord::hash_mask);
#else
- __ andptr(result, markOopDesc::hash_mask_in_place);
+ __ andptr(result, markWord::hash_mask_in_place);
#endif //_LP64
// test if hashCode exists
__ jcc(Assembler::zero, slowCase);
#ifndef _LP64
- __ shrptr(result, markOopDesc::hash_shift);
+ __ shrptr(result, markWord::hash_shift);
#endif
__ ret(0);
__ bind(slowCase);
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3685,6 +3685,36 @@
return start;
}
+ address generate_electronicCodeBook_encryptAESCrypt() {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt");
+ address start = __ pc();
+ const Register from = c_rarg0; // source array address
+ const Register to = c_rarg1; // destination array address
+ const Register key = c_rarg2; // key array address
+ const Register len = c_rarg3; // src len (must be multiple of blocksize 16)
+ __ enter(); // required for proper stackwalking of RuntimeStub frame
+ __ aesecb_encrypt(from, to, key, len);
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ ret(0);
+ return start;
+ }
+
+ address generate_electronicCodeBook_decryptAESCrypt() {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt");
+ address start = __ pc();
+ const Register from = c_rarg0; // source array address
+ const Register to = c_rarg1; // destination array address
+ const Register key = c_rarg2; // key array address
+ const Register len = c_rarg3; // src len (must be multiple of blocksize 16)
+ __ enter(); // required for proper stackwalking of RuntimeStub frame
+ __ aesecb_decrypt(from, to, key, len);
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ ret(0);
+ return start;
+ }
+
address generate_upper_word_mask() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "upper_word_mask");
@@ -5979,6 +6009,8 @@
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) {
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt();
+ StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt();
+ StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt();
} else {
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
}
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -4108,7 +4108,7 @@
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
- (intptr_t)markOopDesc::prototype()); // header
+ (intptr_t)markWord::prototype().value()); // header
__ pop(rcx); // get saved klass back in the register.
}
#ifdef _LP64
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -273,12 +273,12 @@
if (method->is_synchronized()) {
monitor = (BasicObjectLock*) istate->stack_base();
oop lockee = monitor->obj();
- markOop disp = lockee->mark()->set_unlocked();
+ markWord disp = lockee->mark().set_unlocked();
monitor->lock()->set_displaced_header(disp);
- if (lockee->cas_set_mark((markOop)monitor, disp) != disp) {
- if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
- monitor->lock()->set_displaced_header(NULL);
+ if (lockee->cas_set_mark(markWord::from_pointer(monitor), disp) != disp) {
+ if (thread->is_lock_owned((address) disp.clear_lock_bits().to_pointer())) {
+ monitor->lock()->set_displaced_header(markWord::from_pointer(NULL));
}
else {
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
@@ -413,12 +413,12 @@
// Unlock if necessary
if (monitor) {
BasicLock *lock = monitor->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
oop rcvr = monitor->obj();
monitor->set_obj(NULL);
- if (header != NULL) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL) {
+ markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
monitor->set_obj(rcvr); {
HandleMark hm(thread);
--- a/src/hotspot/cpu/zero/globals_zero.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/cpu/zero/globals_zero.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -32,8 +32,6 @@
// Set the default values for platform dependent flags used by the
// runtime system. See globals.hpp for details of what they do.
-define_pd_global(bool, ShareVtableStubs, true);
-
define_pd_global(bool, ImplicitNullChecks, true);
define_pd_global(bool, TrapBasedNullChecks, false);
define_pd_global(bool, UncommonNullCast, true);
--- a/src/hotspot/os/windows/os_windows.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/os/windows/os_windows.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -4122,7 +4122,7 @@
// in order to forward implicit exceptions from code in AOT
// generated DLLs. This is necessary since these DLLs are not
// registered for structured exceptions like codecache methods are.
- if (UseAOT) {
+ if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
}
#endif
--- a/src/hotspot/share/aot/aotCodeHeap.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -532,6 +532,8 @@
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_aescrypt_decryptBlock", address, StubRoutines::_aescrypt_decryptBlock);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_cipherBlockChaining_encryptAESCrypt", address, StubRoutines::_cipherBlockChaining_encryptAESCrypt);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_cipherBlockChaining_decryptAESCrypt", address, StubRoutines::_cipherBlockChaining_decryptAESCrypt);
+ SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_electronicCodeBook_encryptAESCrypt", address, StubRoutines::_electronicCodeBook_encryptAESCrypt);
+ SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_electronicCodeBook_decryptAESCrypt", address, StubRoutines::_electronicCodeBook_decryptAESCrypt);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_update_bytes_crc32", address, StubRoutines::_updateBytesCRC32);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_crc_table_adr", address, StubRoutines::_crc_table_adr);
--- a/src/hotspot/share/ci/ciMethod.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/ci/ciMethod.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -111,7 +111,7 @@
_can_be_parsed = false;
}
} else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ DEBUG_ONLY(CompilerThread::current()->check_possible_safepoint());
}
if (h_m()->method_holder()->is_linked()) {
--- a/src/hotspot/share/classfile/altHashing.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/classfile/altHashing.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -35,8 +35,8 @@
// objects. We don't want to call the synchronizer hash code to install
// this value because it may safepoint.
static intptr_t object_hash(Klass* k) {
- intptr_t hc = k->java_mirror()->mark()->hash();
- return hc != markOopDesc::no_hash ? hc : os::random();
+ intptr_t hc = k->java_mirror()->mark().hash();
+ return hc != markWord::no_hash ? hc : os::random();
}
// Seed value used for each alternative hash calculated.
--- a/src/hotspot/share/classfile/javaClasses.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/classfile/javaClasses.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -2677,14 +2677,14 @@
Method* method = java_lang_StackFrameInfo::get_method(stackFrame, holder, CHECK);
short version = stackFrame->short_field(_version_offset);
- short bci = stackFrame->short_field(_bci_offset);
+ int bci = stackFrame->int_field(_bci_offset);
Symbol* name = method->name();
java_lang_StackTraceElement::fill_in(stack_trace_element, holder, method, version, bci, name, CHECK);
}
#define STACKFRAMEINFO_FIELDS_DO(macro) \
macro(_memberName_offset, k, "memberName", object_signature, false); \
- macro(_bci_offset, k, "bci", short_signature, false)
+ macro(_bci_offset, k, "bci", int_signature, false)
void java_lang_StackFrameInfo::compute_offsets() {
InstanceKlass* k = SystemDictionary::StackFrameInfo_klass();
@@ -4224,6 +4224,7 @@
}
void java_lang_StackFrameInfo::set_bci(oop element, int value) {
+ assert(value >= 0 && value < max_jushort, "must be a valid bci value");
element->int_field_put(_bci_offset, value);
}
--- a/src/hotspot/share/classfile/systemDictionary.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/classfile/systemDictionary.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -2154,7 +2154,7 @@
// NOTE that we must only do this when the class is initally
// defined, not each time it is referenced from a new class loader
if (oopDesc::equals(k->class_loader(), class_loader())) {
- k->set_prototype_header(markOopDesc::biased_locking_prototype());
+ k->set_prototype_header(markWord::biased_locking_prototype());
}
}
--- a/src/hotspot/share/classfile/vmSymbols.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/classfile/vmSymbols.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -463,6 +463,8 @@
switch (id) {
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
+ case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
+ case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
case vmIntrinsics::_counterMode_AESCrypt:
return 1;
case vmIntrinsics::_digestBase_implCompressMB:
@@ -736,6 +738,10 @@
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
if (!UseAESIntrinsics) return true;
break;
+ case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
+ case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
+ if (!UseAESIntrinsics) return true;
+ break;
case vmIntrinsics::_counterMode_AESCrypt:
if (!UseAESCTRIntrinsics) return true;
break;
--- a/src/hotspot/share/classfile/vmSymbols.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/classfile/vmSymbols.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -1020,6 +1020,12 @@
do_name( decrypt_name, "implDecrypt") \
do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)I") \
\
+ do_class(com_sun_crypto_provider_electronicCodeBook, "com/sun/crypto/provider/ElectronicCodeBook") \
+ do_intrinsic(_electronicCodeBook_encryptAESCrypt, com_sun_crypto_provider_electronicCodeBook, ecb_encrypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
+ do_intrinsic(_electronicCodeBook_decryptAESCrypt, com_sun_crypto_provider_electronicCodeBook, ecb_decrypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
+ do_name(ecb_encrypt_name, "implECBEncrypt") \
+ do_name(ecb_decrypt_name, "implECBDecrypt") \
+ \
do_class(com_sun_crypto_provider_counterMode, "com/sun/crypto/provider/CounterMode") \
do_intrinsic(_counterMode_AESCrypt, com_sun_crypto_provider_counterMode, crypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
do_name( crypt_name, "implCrypt") \
--- a/src/hotspot/share/code/nmethod.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/code/nmethod.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -2268,7 +2268,6 @@
if (!is_not_installed()) {
if (CompiledICLocker::is_safe(this)) {
CompiledIC_at(this, call_site);
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
} else {
CompiledICLocker ml_verify(this);
CompiledIC_at(this, call_site);
--- a/src/hotspot/share/code/vtableStubs.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/code/vtableStubs.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -213,7 +213,7 @@
VtableStub* s;
{
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
- s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
+ s = lookup(is_vtable_stub, vtable_index);
if (s == NULL) {
if (is_vtable_stub) {
s = create_vtable_stub(vtable_index);
@@ -234,7 +234,8 @@
}
// Notify JVMTI about this stub. The event will be recorded by the enclosing
// JvmtiDynamicCodeEventCollector and posted when this thread has released
- // all locks.
+ // all locks. Only post this event if a new state is not required. Creating a new state would
+ // cause a safepoint and the caller of this code has a NoSafepointVerifier.
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
s->code_begin(), s->code_end());
--- a/src/hotspot/share/compiler/compileBroker.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/compiler/compileBroker.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1546,7 +1546,6 @@
assert(task->is_blocking(), "can only wait on blocking task");
JavaThread* thread = JavaThread::current();
- thread->set_blocked_on_compilation(true);
methodHandle method(thread, task->method());
bool free_task;
@@ -1564,7 +1563,6 @@
}
}
- thread->set_blocked_on_compilation(false);
if (free_task) {
if (is_compilation_disabled_forever()) {
CompileTask::free(task);
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -62,7 +62,7 @@
}
size_t used_in_bytes() {
- return _space->used();
+ return _space->used_stable();
}
};
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -372,6 +372,8 @@
)
}
_dictionary->set_par_lock(&_parDictionaryAllocLock);
+
+ _used_stable = 0;
}
// Like CompactibleSpace forward() but always calls cross_threshold() to
@@ -577,6 +579,14 @@
return capacity() - free();
}
+size_t CompactibleFreeListSpace::used_stable() const {
+ return _used_stable;
+}
+
+void CompactibleFreeListSpace::recalculate_used_stable() {
+ _used_stable = used();
+}
+
size_t CompactibleFreeListSpace::free() const {
// "MT-safe, but not MT-precise"(TM), if you will: i.e.
// if you do this while the structures are in flux you
@@ -1374,6 +1384,13 @@
debug_only(fc->mangleAllocated(size));
}
+ // During GC we do not need to recalculate the stable used value for
+ // every allocation in old gen. It is done once at the end of GC instead
+ // for performance reasons.
+ if (!CMSHeap::heap()->is_gc_active()) {
+ recalculate_used_stable();
+ }
+
return res;
}
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -192,6 +192,9 @@
// Used to keep track of limit of sweep for the space
HeapWord* _sweep_limit;
+ // Stable value of used().
+ size_t _used_stable;
+
// Used to make the young collector update the mod union table
MemRegionClosure* _preconsumptionDirtyCardClosure;
@@ -412,6 +415,17 @@
// which overestimates the region by returning the entire
// committed region (this is safe, but inefficient).
+ // Returns monotonically increasing stable used space bytes for CMS.
+ // This is required for jstat and other memory monitoring tools
+ // that might otherwise see inconsistent used space values during a garbage
+ // collection, promotion or allocation into compactibleFreeListSpace.
+ // The value returned by this function might be smaller than the
+ // actual value.
+ size_t used_stable() const;
+ // Recalculate and cache the current stable used() value. Only to be called
+ // in places where we can be sure that the result is stable.
+ void recalculate_used_stable();
+
// Returns a subregion of the space containing all the objects in
// the space.
MemRegion used_region() const {
@@ -736,7 +750,7 @@
size_t PromotionInfo::refillSize() const {
const size_t CMSSpoolBlockSize = 256;
- const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
+ const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markWord)
* CMSSpoolBlockSize);
return CompactibleFreeListSpace::adjustObjectSize(sz);
}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -692,6 +692,10 @@
return _cmsSpace->max_alloc_in_words() * HeapWordSize;
}
+size_t ConcurrentMarkSweepGeneration::used_stable() const {
+ return cmsSpace()->used_stable();
+}
+
size_t ConcurrentMarkSweepGeneration::max_available() const {
return free() + _virtual_space.uncommitted_size();
}
@@ -1010,7 +1014,7 @@
// Things to support parallel young-gen collection.
oop
ConcurrentMarkSweepGeneration::par_promote(int thread_num,
- oop old, markOop m,
+ oop old, markWord m,
size_t word_sz) {
#ifndef PRODUCT
if (CMSHeap::heap()->promotion_should_fail()) {
@@ -1523,6 +1527,8 @@
FreelistLocker z(this);
MetaspaceGC::compute_new_size();
_cmsGen->compute_new_size_free_list();
+ // recalculate CMS used space after CMS collection
+ _cmsGen->cmsSpace()->recalculate_used_stable();
}
// A work method used by the foreground collector to do
@@ -2051,6 +2057,7 @@
_capacity_at_prologue = capacity();
_used_at_prologue = used();
+ _cmsSpace->recalculate_used_stable();
// We enable promotion tracking so that card-scanning can recognize
// which objects have been promoted during this GC and skip them.
@@ -2123,6 +2130,7 @@
_eden_chunk_index = 0;
size_t cms_used = _cmsGen->cmsSpace()->used();
+ _cmsGen->cmsSpace()->recalculate_used_stable();
// update performance counters - this uses a special version of
// update_counters() that allows the utilization to be passed as a
@@ -2816,6 +2824,8 @@
rp->enable_discovery();
_collectorState = Marking;
}
+
+ _cmsGen->cmsSpace()->recalculate_used_stable();
}
void CMSCollector::checkpointRootsInitialWork() {
@@ -4177,6 +4187,7 @@
MutexLocker y(bitMapLock(),
Mutex::_no_safepoint_check_flag);
checkpointRootsFinalWork();
+ _cmsGen->cmsSpace()->recalculate_used_stable();
}
verify_work_stacks_empty();
verify_overflow_empty();
@@ -5336,9 +5347,14 @@
// further below.
{
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
+
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
+
+ // recalculate CMS used space after CMS collection
+ _cmsGen->cmsSpace()->recalculate_used_stable();
+
_collectorState = Resizing;
}
}
@@ -5427,6 +5443,7 @@
// Gather statistics on the young generation collection.
collector()->stats().record_gc0_end(used());
}
+ _cmsSpace->recalculate_used_stable();
}
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
@@ -7776,10 +7793,10 @@
assert(stack->capacity() > num, "Shouldn't bite more than can chew");
size_t i = num;
oop cur = _overflow_list;
- const markOop proto = markOopDesc::prototype();
+ const markWord proto = markWord::prototype();
NOT_PRODUCT(ssize_t n = 0;)
for (oop next; i > 0 && cur != NULL; cur = next, i--) {
- next = oop(cur->mark_raw());
+ next = oop(cur->mark_raw().to_pointer());
cur->set_mark_raw(proto); // until proven otherwise
assert(oopDesc::is_oop(cur), "Should be an oop");
bool res = stack->push(cur);
@@ -7863,8 +7880,8 @@
size_t i = num;
oop cur = prefix;
// Walk down the first "num" objects, unless we reach the end.
- for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
- if (cur->mark_raw() == NULL) {
+ for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
+ if (cur->mark_raw().to_pointer() == NULL) {
// We have "num" or fewer elements in the list, so there
// is nothing to return to the global list.
// Write back the NULL in lieu of the BUSY we wrote
@@ -7874,9 +7891,9 @@
}
} else {
// Chop off the suffix and return it to the global list.
- assert(cur->mark_raw() != BUSY, "Error");
- oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
- cur->set_mark_raw(NULL); // break off suffix
+ assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
+ oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
+ cur->set_mark_raw(markWord::from_pointer(NULL)); // break off suffix
// It's possible that the list is still in the empty(busy) state
// we left it in a short while ago; in that case we may be
// able to place back the suffix without incurring the cost
@@ -7896,18 +7913,18 @@
// Too bad, someone else sneaked in (at least) an element; we'll need
// to do a splice. Find tail of suffix so we can prepend suffix to global
// list.
- for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
+ for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
oop suffix_tail = cur;
- assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
+ assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
"Tautology");
observed_overflow_list = _overflow_list;
do {
cur_overflow_list = observed_overflow_list;
if (cur_overflow_list != BUSY) {
// Do the splice ...
- suffix_tail->set_mark_raw(markOop(cur_overflow_list));
+ suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
} else { // cur_overflow_list == BUSY
- suffix_tail->set_mark_raw(NULL);
+ suffix_tail->set_mark_raw(markWord::from_pointer(NULL));
}
// ... and try to place spliced list back on overflow_list ...
observed_overflow_list =
@@ -7919,11 +7936,11 @@
// Push the prefix elements on work_q
assert(prefix != NULL, "control point invariant");
- const markOop proto = markOopDesc::prototype();
+ const markWord proto = markWord::prototype();
oop next;
NOT_PRODUCT(ssize_t n = 0;)
for (cur = prefix; cur != NULL; cur = next) {
- next = oop(cur->mark_raw());
+ next = oop(cur->mark_raw().to_pointer());
cur->set_mark_raw(proto); // until proven otherwise
assert(oopDesc::is_oop(cur), "Should be an oop");
bool res = work_q->push(cur);
@@ -7942,7 +7959,7 @@
NOT_PRODUCT(_num_par_pushes++;)
assert(oopDesc::is_oop(p), "Not an oop");
preserve_mark_if_necessary(p);
- p->set_mark_raw((markOop)_overflow_list);
+ p->set_mark_raw(markWord::from_pointer(_overflow_list));
_overflow_list = p;
}
@@ -7956,9 +7973,9 @@
do {
cur_overflow_list = observed_overflow_list;
if (cur_overflow_list != BUSY) {
- p->set_mark_raw(markOop(cur_overflow_list));
+ p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
} else {
- p->set_mark_raw(NULL);
+ p->set_mark_raw(markWord::from_pointer(NULL));
}
observed_overflow_list =
Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
@@ -7980,7 +7997,7 @@
// the VM can then be changed, incrementally, to deal with such
// failures where possible, thus, incrementally hardening the VM
// in such low resource situations.
-void CMSCollector::preserve_mark_work(oop p, markOop m) {
+void CMSCollector::preserve_mark_work(oop p, markWord m) {
_preserved_oop_stack.push(p);
_preserved_mark_stack.push(m);
assert(m == p->mark_raw(), "Mark word changed");
@@ -7990,15 +8007,15 @@
// Single threaded
void CMSCollector::preserve_mark_if_necessary(oop p) {
- markOop m = p->mark_raw();
- if (m->must_be_preserved(p)) {
+ markWord m = p->mark_raw();
+ if (m.must_be_preserved(p)) {
preserve_mark_work(p, m);
}
}
void CMSCollector::par_preserve_mark_if_necessary(oop p) {
- markOop m = p->mark_raw();
- if (m->must_be_preserved(p)) {
+ markWord m = p->mark_raw();
+ if (m.must_be_preserved(p)) {
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
// Even though we read the mark word without holding
// the lock, we are assured that it will not change
@@ -8038,9 +8055,9 @@
oop p = _preserved_oop_stack.pop();
assert(oopDesc::is_oop(p), "Should be an oop");
assert(_span.contains(p), "oop should be in _span");
- assert(p->mark_raw() == markOopDesc::prototype(),
+ assert(p->mark_raw() == markWord::prototype(),
"Set when taken from overflow list");
- markOop m = _preserved_mark_stack.pop();
+ markWord m = _preserved_mark_stack.pop();
p->set_mark_raw(m);
}
assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -541,8 +541,8 @@
// The following array-pair keeps track of mark words
// displaced for accommodating overflow list above.
// This code will likely be revisited under RFE#4922830.
- Stack<oop, mtGC> _preserved_oop_stack;
- Stack<markOop, mtGC> _preserved_mark_stack;
+ Stack<oop, mtGC> _preserved_oop_stack;
+ Stack<markWord, mtGC> _preserved_mark_stack;
// In support of multi-threaded concurrent phases
YieldingFlexibleWorkGang* _conc_workers;
@@ -742,7 +742,7 @@
void preserve_mark_if_necessary(oop p);
void par_preserve_mark_if_necessary(oop p);
- void preserve_mark_work(oop p, markOop m);
+ void preserve_mark_work(oop p, markWord m);
void restore_preserved_marks_if_any();
NOT_PRODUCT(bool no_preserved_marks() const;)
// In support of testing overflow code
@@ -1112,6 +1112,7 @@
double occupancy() const { return ((double)used())/((double)capacity()); }
size_t contiguous_available() const;
size_t unsafe_max_alloc_nogc() const;
+ size_t used_stable() const;
// over-rides
MemRegion used_region_at_save_marks() const;
@@ -1136,7 +1137,7 @@
// Overrides for parallel promotion.
virtual oop par_promote(int thread_num,
- oop obj, markOop m, size_t word_sz);
+ oop obj, markWord m, size_t word_sz);
virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
--- a/src/hotspot/share/gc/cms/freeChunk.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/freeChunk.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -56,14 +56,14 @@
class FreeChunk {
friend class VMStructs;
- // For 64 bit compressed oops, the markOop encodes both the size and the
+ // For 64 bit compressed oops, the markWord encodes both the size and the
// indication that this is a FreeChunk and not an object.
volatile size_t _size;
FreeChunk* _prev;
FreeChunk* _next;
- markOop mark() const volatile { return (markOop)_size; }
- void set_mark(markOop m) { _size = (size_t)m; }
+ markWord mark() const volatile { return markWord((uintptr_t)_size); }
+ void set_mark(markWord m) { _size = (size_t)m.value(); }
public:
NOT_PRODUCT(static const size_t header_size();)
@@ -79,7 +79,7 @@
}
bool is_free() const volatile {
- LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
+ LP64_ONLY(if (UseCompressedOops) return mark().is_cms_free_chunk(); else)
return (((intptr_t)_prev) & 0x1) == 0x1;
}
bool cantCoalesce() const {
@@ -100,11 +100,11 @@
debug_only(void* size_addr() const { return (void*)&_size; })
size_t size() const volatile {
- LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
+ LP64_ONLY(if (UseCompressedOops) return mark().get_size(); else )
return _size;
}
void set_size(size_t sz) {
- LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
+ LP64_ONLY(if (UseCompressedOops) set_mark(markWord::set_size_and_free(sz)); else )
_size = sz;
}
@@ -126,7 +126,7 @@
#ifdef _LP64
if (UseCompressedOops) {
OrderAccess::storestore();
- set_mark(markOopDesc::prototype());
+ set_mark(markWord::prototype());
}
#endif
assert(!is_free(), "Error");
--- a/src/hotspot/share/gc/cms/gSpaceCounters.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/gSpaceCounters.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -59,7 +59,7 @@
}
inline void update_used() {
- _used->set_value(_gen->used());
+ _used->set_value(_gen->used_stable());
}
// special version of update_used() to allow the used value to be
@@ -103,7 +103,7 @@
GenerationUsedHelper(Generation* g) : _gen(g) { }
inline jlong take_sample() {
- return _gen->used();
+ return _gen->used_stable();
}
};
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1078,7 +1078,7 @@
oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop old,
size_t sz,
- markOop m) {
+ markWord m) {
// In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that
// the caller observed it to be not forwarded at some time in the past.
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -381,7 +381,7 @@
// that must not contain a forwarding pointer (though one might be
// inserted in "obj"s mark word by a parallel thread).
oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
- oop obj, size_t obj_sz, markOop m);
+ oop obj, size_t obj_sz, markWord m);
// in support of testing overflow code
NOT_PRODUCT(int _overflow_counter;)
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -44,9 +44,9 @@
// we need to ensure that it is copied (see comment in
// ParScanClosure::do_oop_work).
Klass* objK = obj->klass();
- markOop m = obj->mark_raw();
+ markWord m = obj->mark_raw();
oop new_obj;
- if (m->is_marked()) { // Contains forwarding pointer.
+ if (m.is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
} else {
size_t obj_sz = obj->size_given_klass(objK);
@@ -108,9 +108,9 @@
// overwritten with an overflow next pointer after the object is
// forwarded.
Klass* objK = obj->klass();
- markOop m = obj->mark_raw();
+ markWord m = obj->mark_raw();
oop new_obj;
- if (m->is_marked()) { // Contains forwarding pointer.
+ if (m.is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/promotionInfo.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -62,12 +62,12 @@
// Return the next displaced header, incrementing the pointer and
// recycling spool area as necessary.
-markOop PromotionInfo::nextDisplacedHeader() {
+markWord PromotionInfo::nextDisplacedHeader() {
assert(_spoolHead != NULL, "promotionInfo inconsistency");
assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
"Empty spool space: no displaced header can be fetched");
assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
- markOop hdr = _spoolHead->displacedHdr[_firstIndex];
+ markWord hdr = _spoolHead->displacedHdr[_firstIndex];
// Spool forward
if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
// forward to next block, recycling this block into spare spool buffer
@@ -93,15 +93,15 @@
void PromotionInfo::track(PromotedObject* trackOop, Klass* klassOfOop) {
// make a copy of header as it may need to be spooled
- markOop mark = oop(trackOop)->mark_raw();
+ markWord mark = oop(trackOop)->mark_raw();
trackOop->clear_next();
- if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
+ if (mark.must_be_preserved_for_cms_scavenge(klassOfOop)) {
// save non-prototypical header, and mark oop
saveDisplacedHeader(mark);
trackOop->setDisplacedMark();
} else {
// we'd like to assert something like the following:
- // assert(mark == markOopDesc::prototype(), "consistency check");
+ // assert(mark == markWord::prototype(), "consistency check");
// ... but the above won't work because the age bits have not (yet) been
// cleared. The remainder of the check would be identical to the
// condition checked in must_be_preserved() above, so we don't really
@@ -123,7 +123,7 @@
// Save the given displaced header, incrementing the pointer and
// obtaining more spool area as necessary.
-void PromotionInfo::saveDisplacedHeader(markOop hdr) {
+void PromotionInfo::saveDisplacedHeader(markWord hdr) {
assert(_spoolHead != NULL && _spoolTail != NULL,
"promotionInfo inconsistency");
assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
--- a/src/hotspot/share/gc/cms/promotionInfo.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/cms/promotionInfo.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -93,19 +93,19 @@
protected:
SpoolBlock* nextSpoolBlock;
size_t bufferSize; // number of usable words in this block
- markOop* displacedHdr; // the displaced headers start here
+ markWord* displacedHdr; // the displaced headers start here
// Note about bufferSize: it denotes the number of entries available plus 1;
// legal indices range from 1 through BufferSize - 1. See the verification
// code verify() that counts the number of displaced headers spooled.
size_t computeBufferSize() {
- return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
+ return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markWord);
}
public:
void init() {
bufferSize = computeBufferSize();
- displacedHdr = (markOop*)&displacedHdr;
+ displacedHdr = (markWord*)&displacedHdr;
nextSpoolBlock = NULL;
}
@@ -151,8 +151,8 @@
void track(PromotedObject* trackOop, Klass* klassOfOop); // keep track of a promoted oop
void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
CompactibleFreeListSpace* space() const { return _space; }
- markOop nextDisplacedHeader(); // get next header & forward spool pointer
- void saveDisplacedHeader(markOop hdr);
+ markWord nextDisplacedHeader(); // get next header & forward spool pointer
+ void saveDisplacedHeader(markWord hdr);
// save header and forward spool
inline size_t refillSize() const;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1082,7 +1082,6 @@
G1BarrierSet::dirty_card_queue_set().abandon_logs();
assert(G1BarrierSet::dirty_card_queue_set().num_completed_buffers() == 0,
"DCQS should be empty");
- redirty_cards_queue_set().verify_empty();
}
void G1CollectedHeap::verify_after_full_collection() {
@@ -1521,7 +1520,6 @@
_collection_set(this, _policy),
_hot_card_cache(NULL),
_rem_set(NULL),
- _redirty_cards_queue_set(),
_cm(NULL),
_cm_thread(NULL),
_cr(NULL),
@@ -1691,9 +1689,6 @@
&bs->dirty_card_queue_buffer_allocator(),
true); // init_free_ids
- // Use same buffer allocator as dirty card qset, to allow merging.
- _redirty_cards_queue_set.initialize(&bs->dirty_card_queue_buffer_allocator());
-
// Create the hot card cache.
_hot_card_cache = new G1HotCardCache(this);
@@ -3028,7 +3023,9 @@
calculate_collection_set(evacuation_info, target_pause_time_ms);
+ G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
G1ParScanThreadStateSet per_thread_states(this,
+ &rdcqs,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length());
@@ -3040,7 +3037,7 @@
if (_collection_set.optional_region_length() != 0) {
evacuate_optional_collection_set(&per_thread_states);
}
- post_evacuate_collection_set(evacuation_info, &per_thread_states);
+ post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
start_new_collection_set();
@@ -3122,22 +3119,22 @@
return true;
}
-void G1CollectedHeap::remove_self_forwarding_pointers() {
- G1ParRemoveSelfForwardPtrsTask rsfp_task;
+void G1CollectedHeap::remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs) {
+ G1ParRemoveSelfForwardPtrsTask rsfp_task(rdcqs);
workers()->run_task(&rsfp_task);
}
-void G1CollectedHeap::restore_after_evac_failure() {
+void G1CollectedHeap::restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs) {
double remove_self_forwards_start = os::elapsedTime();
- remove_self_forwarding_pointers();
+ remove_self_forwarding_pointers(rdcqs);
SharedRestorePreservedMarksTaskExecutor task_executor(workers());
_preserved_marks_set.restore(&task_executor);
phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}
-void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
+void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m) {
if (!_evacuation_failed) {
_evacuation_failed = true;
}
@@ -3264,15 +3261,14 @@
}
};
-void G1CollectedHeap::redirty_logged_cards() {
+void G1CollectedHeap::redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs) {
double redirty_logged_cards_start = os::elapsedTime();
- G1RedirtyLoggedCardsTask redirty_task(&redirty_cards_queue_set(), this);
+ G1RedirtyLoggedCardsTask redirty_task(rdcqs, this);
workers()->run_task(&redirty_task);
G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
- dcq.merge_bufferlists(&redirty_cards_queue_set());
- redirty_cards_queue_set().verify_empty();
+ dcq.merge_bufferlists(rdcqs);
phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
}
@@ -3603,8 +3599,6 @@
// Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
-
- redirty_cards_queue_set().verify_empty();
}
class G1EvacuateRegionsBaseTask : public AbstractGangTask {
@@ -3806,7 +3800,9 @@
_collection_set.abandon_optional_collection_set(per_thread_states);
}
-void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
+void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
+ G1RedirtyCardsQueueSet* rdcqs,
+ G1ParScanThreadStateSet* per_thread_states) {
rem_set()->cleanup_after_scan_heap_roots();
// Process any discovered reference objects - we have
@@ -3834,7 +3830,7 @@
_allocator->release_gc_alloc_regions(evacuation_info);
if (evacuation_failed()) {
- restore_after_evac_failure();
+ restore_after_evac_failure(rdcqs);
// Reset the G1EvacuationFailureALot counters and flags
NOT_PRODUCT(reset_evacuation_should_fail();)
@@ -3869,7 +3865,7 @@
purge_code_root_memory();
- redirty_logged_cards();
+ redirty_logged_cards(rdcqs);
free_collection_set(&_collection_set, evacuation_info, per_thread_states->surviving_young_words());
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -762,7 +762,9 @@
public:
void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
- void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
+ void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
+ G1RedirtyCardsQueueSet* rdcqs,
+ G1ParScanThreadStateSet* pss);
void expand_heap_after_young_collection();
// Update object copying statistics.
@@ -774,10 +776,6 @@
// The g1 remembered set of the heap.
G1RemSet* _rem_set;
- // A set of cards that cover the objects for which the Rsets should be updated
- // concurrently after the collection.
- G1RedirtyCardsQueueSet _redirty_cards_queue_set;
-
// After a collection pause, convert the regions in the collection set into free
// regions.
void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
@@ -803,17 +801,17 @@
// Failed evacuations cause some logical from-space objects to have
// forwarding pointers to themselves. Reset them.
- void remove_self_forwarding_pointers();
+ void remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs);
// Restore the objects in the regions in the collection set after an
// evacuation failure.
- void restore_after_evac_failure();
+ void restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs);
PreservedMarksSet _preserved_marks_set;
// Preserve the mark of "obj", if necessary, in preparation for its mark
// word being overwritten with a self-forwarding-pointer.
- void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
+ void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
#ifndef PRODUCT
// Support for forcing evacuation failures. Analogous to
@@ -935,11 +933,6 @@
uint num_task_queues() const;
- // A set of cards where updates happened during the GC
- G1RedirtyCardsQueueSet& redirty_cards_queue_set() {
- return _redirty_cards_queue_set;
- }
-
// Create a G1CollectedHeap.
// Must call the initialize method afterwards.
// May not return if something goes wrong.
@@ -1366,7 +1359,8 @@
void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
// Redirty logged cards in the refinement queue.
- void redirty_logged_cards();
+ void redirty_logged_cards(G1RedirtyCardsQueueSet* rdcqs);
+
// Verification
// Deduplicate the string
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -203,10 +203,10 @@
UpdateLogBuffersDeferred _log_buffer_cl;
public:
- RemoveSelfForwardPtrHRClosure(uint worker_id) :
+ RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs, uint worker_id) :
_g1h(G1CollectedHeap::heap()),
_worker_id(worker_id),
- _rdcq(&_g1h->redirty_cards_queue_set()),
+ _rdcq(rdcqs),
_log_buffer_cl(&_rdcq) {
}
@@ -250,13 +250,14 @@
}
};
-G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask() :
+G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs) :
AbstractGangTask("G1 Remove Self-forwarding Pointers"),
_g1h(G1CollectedHeap::heap()),
+ _rdcqs(rdcqs),
_hrclaimer(_g1h->workers()->active_workers()) { }
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
- RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id);
+ RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id);
_g1h->collection_set_iterate_increment_from(&rsfp_cl, &_hrclaimer, worker_id);
}
--- a/src/hotspot/share/gc/g1/g1EvacFailure.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -31,16 +31,18 @@
#include "utilities/globalDefinitions.hpp"
class G1CollectedHeap;
+class G1RedirtyCardsQueueSet;
// Task to fixup self-forwarding pointers
// installed as a result of an evacuation failure.
class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
+ G1RedirtyCardsQueueSet* _rdcqs;
HeapRegionClaimer _hrclaimer;
public:
- G1ParRemoveSelfForwardPtrsTask();
+ G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs);
void work(uint worker_id);
};
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -282,7 +282,7 @@
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
- // word is set to markOop::marked_value - effectively removing
+ // word is set to markWord::marked_value - effectively removing
// any hash values from the mark word. These hash values are
// used when verifying the dictionaries and so removing them
// from the mark word can make verification of the dictionaries
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -116,11 +116,11 @@
} else {
// Make sure object has the correct mark-word set or that it will be
// fixed when restoring the preserved marks.
- assert(object->mark_raw() == markOopDesc::prototype_for_object(object) || // Correct mark
- object->mark_raw()->must_be_preserved(object) || // Will be restored by PreservedMarksSet
+ assert(object->mark_raw() == markWord::prototype_for_object(object) || // Correct mark
+ object->mark_raw().must_be_preserved(object) || // Will be restored by PreservedMarksSet
(UseBiasedLocking && object->has_bias_pattern_raw()), // Will be restored by BiasedLocking
"should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
- p2i(object), p2i(object->mark_raw()), p2i(markOopDesc::prototype_for_object(object)));
+ p2i(object), object->mark_raw().value(), markWord::prototype_for_object(object).value());
}
assert(object->forwardee() == NULL, "should be forwarded to NULL");
}
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -50,8 +50,8 @@
}
// Marked by us, preserve if needed.
- markOop mark = obj->mark_raw();
- if (mark->must_be_preserved(obj) &&
+ markWord mark = obj->mark_raw();
+ if (mark.must_be_preserved(obj) &&
!G1ArchiveAllocator::is_open_archive_object(obj)) {
preserved_stack()->push(obj, mark);
}
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -77,11 +77,11 @@
oop forwardee = obj->forwardee();
if (forwardee == NULL) {
// Not forwarded, return current reference.
- assert(obj->mark_raw() == markOopDesc::prototype_for_object(obj) || // Correct mark
- obj->mark_raw()->must_be_preserved(obj) || // Will be restored by PreservedMarksSet
+ assert(obj->mark_raw() == markWord::prototype_for_object(obj) || // Correct mark
+ obj->mark_raw().must_be_preserved(obj) || // Will be restored by PreservedMarksSet
(UseBiasedLocking && obj->has_bias_pattern_raw()), // Will be restored by BiasedLocking
"Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
- p2i(obj), p2i(obj->mark_raw()), p2i(markOopDesc::prototype_for_object(obj)));
+ p2i(obj), obj->mark_raw().value(), markWord::prototype_for_object(obj).value());
return;
}
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -230,9 +230,9 @@
const G1HeapRegionAttr state = _g1h->region_attr(obj);
if (state.is_in_cset()) {
oop forwardee;
- markOop m = obj->mark_raw();
- if (m->is_marked()) {
- forwardee = (oop) m->decode_pointer();
+ markWord m = obj->mark_raw();
+ if (m.is_marked()) {
+ forwardee = (oop) m.decode_pointer();
} else {
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
}
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -38,12 +38,13 @@
#include "runtime/prefetch.inline.hpp"
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
+ G1RedirtyCardsQueueSet* rdcqs,
uint worker_id,
size_t young_cset_length,
size_t optional_cset_length)
: _g1h(g1h),
_refs(g1h->task_queue(worker_id)),
- _rdcq(&g1h->redirty_cards_queue_set()),
+ _rdcq(rdcqs),
_ct(g1h->card_table()),
_closures(NULL),
_plab_allocator(NULL),
@@ -196,10 +197,10 @@
}
}
-G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age) {
+G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
if (region_attr.is_young()) {
- age = !m->has_displaced_mark_helper() ? m->age()
- : m->displaced_mark_helper()->age();
+ age = !m.has_displaced_mark_helper() ? m.age()
+ : m.displaced_mark_helper().age();
if (age < _tenuring_threshold) {
return region_attr;
}
@@ -223,7 +224,7 @@
oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
oop const old,
- markOop const old_mark) {
+ markWord const old_mark) {
const size_t word_sz = old->size();
HeapRegion* const from_region = _g1h->heap_region_containing(old);
// +1 to make the -1 indexes valid...
@@ -281,18 +282,18 @@
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
if (dest_attr.is_young()) {
- if (age < markOopDesc::max_age) {
+ if (age < markWord::max_age) {
age++;
}
- if (old_mark->has_displaced_mark_helper()) {
+ if (old_mark.has_displaced_mark_helper()) {
// In this case, we have to install the mark word first,
// otherwise obj looks to be forwarded (the old mark word,
// which contains the forward pointer, was copied)
obj->set_mark_raw(old_mark);
- markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
- old_mark->set_displaced_mark_helper(new_mark);
+ markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
+ old_mark.set_displaced_mark_helper(new_mark);
} else {
- obj->set_mark_raw(old_mark->set_age(age));
+ obj->set_mark_raw(old_mark.set_age(age));
}
_age_table.add(age, word_sz);
} else {
@@ -336,7 +337,7 @@
assert(worker_id < _n_workers, "out of bounds access");
if (_states[worker_id] == NULL) {
_states[worker_id] =
- new G1ParScanThreadState(_g1h, worker_id, _young_cset_length, _optional_cset_length);
+ new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length);
}
return _states[worker_id];
}
@@ -376,7 +377,7 @@
}
}
-oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
+oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
@@ -407,10 +408,12 @@
}
}
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
+ G1RedirtyCardsQueueSet* rdcqs,
uint n_workers,
size_t young_cset_length,
size_t optional_cset_length) :
_g1h(g1h),
+ _rdcqs(rdcqs),
_states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
_surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
_young_cset_length(young_cset_length),
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -97,6 +97,7 @@
public:
G1ParScanThreadState(G1CollectedHeap* g1h,
+ G1RedirtyCardsQueueSet* rdcqs,
uint worker_id,
size_t young_cset_length,
size_t optional_cset_length);
@@ -203,7 +204,7 @@
size_t word_sz,
bool previous_plab_refill_failed);
- inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age);
+ inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
void report_promotion_event(G1HeapRegionAttr const dest_attr,
oop const old, size_t word_sz, uint age,
@@ -214,7 +215,7 @@
inline void trim_queue_to_threshold(uint threshold);
public:
- oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markOop const old_mark);
+ oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markWord const old_mark);
void trim_queue();
void trim_queue_partially();
@@ -225,7 +226,7 @@
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
// An attempt to evacuate "obj" has failed; take necessary steps.
- oop handle_evacuation_failure_par(oop obj, markOop m);
+ oop handle_evacuation_failure_par(oop obj, markWord m);
template <typename T>
inline void remember_root_into_optional_region(T* p);
@@ -237,6 +238,7 @@
class G1ParScanThreadStateSet : public StackObj {
G1CollectedHeap* _g1h;
+ G1RedirtyCardsQueueSet* _rdcqs;
G1ParScanThreadState** _states;
size_t* _surviving_young_words_total;
size_t _young_cset_length;
@@ -246,6 +248,7 @@
public:
G1ParScanThreadStateSet(G1CollectedHeap* g1h,
+ G1RedirtyCardsQueueSet* rdcqs,
uint n_workers,
size_t young_cset_length,
size_t optional_cset_length);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -53,9 +53,9 @@
return;
}
- markOop m = obj->mark_raw();
- if (m->is_marked()) {
- obj = (oop) m->decode_pointer();
+ markWord m = obj->mark_raw();
+ if (m.is_marked()) {
+ obj = (oop) m.decode_pointer();
} else {
obj = copy_to_survivor_space(region_attr, obj, m);
}
--- a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -99,13 +99,15 @@
// G1RedirtyCardsQueueSet
-G1RedirtyCardsQueueSet::G1RedirtyCardsQueueSet() :
+G1RedirtyCardsQueueSet::G1RedirtyCardsQueueSet(BufferNode::Allocator* allocator) :
PtrQueueSet(),
_list(),
_entry_count(0),
_tail(NULL)
DEBUG_ONLY(COMMA _collecting(true))
-{}
+{
+ initialize(allocator);
+}
G1RedirtyCardsQueueSet::~G1RedirtyCardsQueueSet() {
verify_empty();
--- a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -110,11 +110,9 @@
void update_tail(BufferNode* node);
public:
- G1RedirtyCardsQueueSet();
+ G1RedirtyCardsQueueSet(BufferNode::Allocator* allocator);
~G1RedirtyCardsQueueSet();
- using PtrQueueSet::initialize;
-
void verify_empty() const NOT_DEBUG_RETURN;
// Collect buffers. These functions are thread-safe.
--- a/src/hotspot/share/gc/parallel/gcTaskManager.cpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1074 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/gcTaskThread.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/workerManager.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-
-//
-// GCTask
-//
-
-const char* GCTask::Kind::to_string(kind value) {
- const char* result = "unknown GCTask kind";
- switch (value) {
- default:
- result = "unknown GCTask kind";
- break;
- case unknown_task:
- result = "unknown task";
- break;
- case ordinary_task:
- result = "ordinary task";
- break;
- case wait_for_barrier_task:
- result = "wait for barrier task";
- break;
- case noop_task:
- result = "noop task";
- break;
- case idle_task:
- result = "idle task";
- break;
- }
- return result;
-};
-
-GCTask::GCTask() {
- initialize(Kind::ordinary_task, GCId::current());
-}
-
-GCTask::GCTask(Kind::kind kind) {
- initialize(kind, GCId::current());
-}
-
-GCTask::GCTask(Kind::kind kind, uint gc_id) {
- initialize(kind, gc_id);
-}
-
-void GCTask::initialize(Kind::kind kind, uint gc_id) {
- _kind = kind;
- _affinity = GCTaskManager::sentinel_worker();
- _older = NULL;
- _newer = NULL;
- _gc_id = gc_id;
-}
-
-void GCTask::destruct() {
- assert(older() == NULL, "shouldn't have an older task");
- assert(newer() == NULL, "shouldn't have a newer task");
- // Nothing to do.
-}
-
-NOT_PRODUCT(
-void GCTask::print(const char* message) const {
- tty->print(INTPTR_FORMAT " <- " INTPTR_FORMAT "(%u) -> " INTPTR_FORMAT,
- p2i(newer()), p2i(this), affinity(), p2i(older()));
-}
-)
-
-//
-// GCTaskQueue
-//
-
-GCTaskQueue* GCTaskQueue::create() {
- GCTaskQueue* result = new GCTaskQueue(false);
- if (TraceGCTaskQueue) {
- tty->print_cr("GCTaskQueue::create()"
- " returns " INTPTR_FORMAT, p2i(result));
- }
- return result;
-}
-
-GCTaskQueue* GCTaskQueue::create_on_c_heap() {
- GCTaskQueue* result = new(ResourceObj::C_HEAP, mtGC) GCTaskQueue(true);
- if (TraceGCTaskQueue) {
- tty->print_cr("GCTaskQueue::create_on_c_heap()"
- " returns " INTPTR_FORMAT,
- p2i(result));
- }
- return result;
-}
-
-GCTaskQueue::GCTaskQueue(bool on_c_heap) :
- _is_c_heap_obj(on_c_heap) {
- initialize();
- if (TraceGCTaskQueue) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " GCTaskQueue::GCTaskQueue() constructor",
- p2i(this));
- }
-}
-
-void GCTaskQueue::destruct() {
- // Nothing to do.
-}
-
-void GCTaskQueue::destroy(GCTaskQueue* that) {
- if (TraceGCTaskQueue) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " GCTaskQueue::destroy()"
- " is_c_heap_obj: %s",
- p2i(that),
- that->is_c_heap_obj() ? "true" : "false");
- }
- // That instance may have been allocated as a CHeapObj,
- // in which case we have to free it explicitly.
- if (that != NULL) {
- that->destruct();
- assert(that->is_empty(), "should be empty");
- if (that->is_c_heap_obj()) {
- FreeHeap(that);
- }
- }
-}
-
-void GCTaskQueue::initialize() {
- set_insert_end(NULL);
- set_remove_end(NULL);
- set_length(0);
-}
-
-// Enqueue one task.
-void GCTaskQueue::enqueue(GCTask* task) {
- if (TraceGCTaskQueue) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " GCTaskQueue::enqueue(task: "
- INTPTR_FORMAT ")",
- p2i(this), p2i(task));
- print("before:");
- }
- assert(task != NULL, "shouldn't have null task");
- assert(task->older() == NULL, "shouldn't be on queue");
- assert(task->newer() == NULL, "shouldn't be on queue");
- task->set_newer(NULL);
- task->set_older(insert_end());
- if (is_empty()) {
- set_remove_end(task);
- } else {
- insert_end()->set_newer(task);
- }
- set_insert_end(task);
- increment_length();
- verify_length();
- if (TraceGCTaskQueue) {
- print("after:");
- }
-}
-
-// Enqueue a whole list of tasks. Empties the argument list.
-void GCTaskQueue::enqueue(GCTaskQueue* list) {
- if (TraceGCTaskQueue) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " GCTaskQueue::enqueue(list: "
- INTPTR_FORMAT ")",
- p2i(this), p2i(list));
- print("before:");
- list->print("list:");
- }
- if (list->is_empty()) {
- // Enqueueing the empty list: nothing to do.
- return;
- }
- uint list_length = list->length();
- if (is_empty()) {
- // Enqueueing to empty list: just acquire elements.
- set_insert_end(list->insert_end());
- set_remove_end(list->remove_end());
- set_length(list_length);
- } else {
- // Prepend argument list to our queue.
- list->remove_end()->set_older(insert_end());
- insert_end()->set_newer(list->remove_end());
- set_insert_end(list->insert_end());
- set_length(length() + list_length);
- // empty the argument list.
- }
- list->initialize();
- if (TraceGCTaskQueue) {
- print("after:");
- list->print("list:");
- }
- verify_length();
-}
-
-// Dequeue one task.
-GCTask* GCTaskQueue::dequeue() {
- if (TraceGCTaskQueue) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " GCTaskQueue::dequeue()", p2i(this));
- print("before:");
- }
- assert(!is_empty(), "shouldn't dequeue from empty list");
- GCTask* result = remove();
- assert(result != NULL, "shouldn't have NULL task");
- if (TraceGCTaskQueue) {
- tty->print_cr(" return: " INTPTR_FORMAT, p2i(result));
- print("after:");
- }
- return result;
-}
-
-// Dequeue one task, preferring one with affinity.
-GCTask* GCTaskQueue::dequeue(uint affinity) {
- if (TraceGCTaskQueue) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " GCTaskQueue::dequeue(%u)", p2i(this), affinity);
- print("before:");
- }
- assert(!is_empty(), "shouldn't dequeue from empty list");
- // Look down to the next barrier for a task with this affinity.
- GCTask* result = NULL;
- for (GCTask* element = remove_end();
- element != NULL;
- element = element->newer()) {
- if (element->is_barrier_task()) {
- // Don't consider barrier tasks, nor past them.
- result = NULL;
- break;
- }
- if (element->affinity() == affinity) {
- result = remove(element);
- break;
- }
- }
- // If we didn't find anything with affinity, just take the next task.
- if (result == NULL) {
- result = remove();
- }
- if (TraceGCTaskQueue) {
- tty->print_cr(" return: " INTPTR_FORMAT, p2i(result));
- print("after:");
- }
- return result;
-}
-
-GCTask* GCTaskQueue::remove() {
- // Dequeue from remove end.
- GCTask* result = remove_end();
- assert(result != NULL, "shouldn't have null task");
- assert(result->older() == NULL, "not the remove_end");
- set_remove_end(result->newer());
- if (remove_end() == NULL) {
- assert(insert_end() == result, "not a singleton");
- set_insert_end(NULL);
- } else {
- remove_end()->set_older(NULL);
- }
- result->set_newer(NULL);
- decrement_length();
- assert(result->newer() == NULL, "shouldn't be on queue");
- assert(result->older() == NULL, "shouldn't be on queue");
- verify_length();
- return result;
-}
-
-GCTask* GCTaskQueue::remove(GCTask* task) {
- // This is slightly more work, and has slightly fewer asserts
- // than removing from the remove end.
- assert(task != NULL, "shouldn't have null task");
- GCTask* result = task;
- if (result->newer() != NULL) {
- result->newer()->set_older(result->older());
- } else {
- assert(insert_end() == result, "not youngest");
- set_insert_end(result->older());
- }
- if (result->older() != NULL) {
- result->older()->set_newer(result->newer());
- } else {
- assert(remove_end() == result, "not oldest");
- set_remove_end(result->newer());
- }
- result->set_newer(NULL);
- result->set_older(NULL);
- decrement_length();
- verify_length();
- return result;
-}
-
-NOT_PRODUCT(
-// Count the elements in the queue and verify the length against
-// that count.
-void GCTaskQueue::verify_length() const {
- uint count = 0;
- for (GCTask* element = insert_end();
- element != NULL;
- element = element->older()) {
-
- count++;
- }
- assert(count == length(), "Length does not match queue");
-}
-
-void GCTaskQueue::print(const char* message) const {
- tty->print_cr("[" INTPTR_FORMAT "] GCTaskQueue:"
- " insert_end: " INTPTR_FORMAT
- " remove_end: " INTPTR_FORMAT
- " length: %d"
- " %s",
- p2i(this), p2i(insert_end()), p2i(remove_end()), length(), message);
- uint count = 0;
- for (GCTask* element = insert_end();
- element != NULL;
- element = element->older()) {
- element->print(" ");
- count++;
- tty->cr();
- }
- tty->print("Total tasks: %d", count);
-}
-)
-
-//
-// SynchronizedGCTaskQueue
-//
-
-SynchronizedGCTaskQueue::SynchronizedGCTaskQueue(GCTaskQueue* queue_arg,
- Monitor * lock_arg) :
- _unsynchronized_queue(queue_arg),
- _lock(lock_arg) {
- assert(unsynchronized_queue() != NULL, "null queue");
- assert(lock() != NULL, "null lock");
-}
-
-SynchronizedGCTaskQueue::~SynchronizedGCTaskQueue() {
- // Nothing to do.
-}
-
-//
-// GCTaskManager
-//
-GCTaskManager::GCTaskManager(uint workers) :
- _workers(workers),
- _created_workers(0),
- _active_workers(0),
- _idle_workers(0) {
- initialize();
-}
-
-GCTaskThread* GCTaskManager::install_worker(uint t) {
- GCTaskThread* new_worker = GCTaskThread::create(this, t, _processor_assignment[t]);
- set_thread(t, new_worker);
- return new_worker;
-}
-
-void GCTaskManager::add_workers(bool initializing) {
- os::ThreadType worker_type = os::pgc_thread;
- uint previous_created_workers = _created_workers;
-
- _created_workers = WorkerManager::add_workers(this,
- _active_workers,
- _workers,
- _created_workers,
- worker_type,
- initializing);
- _active_workers = MIN2(_created_workers, _active_workers);
-
- WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
-}
-
-const char* GCTaskManager::group_name() {
- return "ParGC Thread";
-}
-
-void GCTaskManager::initialize() {
- if (TraceGCTaskManager) {
- tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
- }
- assert(workers() != 0, "no workers");
- _monitor = new Monitor(Mutex::barrier, // rank
- "GCTaskManager monitor", // name
- Mutex::_allow_vm_block_flag, // allow_vm_block
- Monitor::_safepoint_check_never);
- // The queue for the GCTaskManager must be a CHeapObj.
- GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
- _queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
- _noop_task = NoopGCTask::create_on_c_heap();
- _resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
- {
- // Set up worker threads.
- // Distribute the workers among the available processors,
- // unless we were told not to, or if the os doesn't want to.
- _processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
- if (!BindGCTaskThreadsToCPUs ||
- !os::distribute_processes(workers(), _processor_assignment)) {
- for (uint a = 0; a < workers(); a += 1) {
- _processor_assignment[a] = sentinel_worker();
- }
- }
-
- _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
- _active_workers = ParallelGCThreads;
- if (UseDynamicNumberOfGCThreads && !FLAG_IS_CMDLINE(ParallelGCThreads)) {
- _active_workers = 1U;
- }
-
- Log(gc, task, thread) log;
- if (log.is_trace()) {
- LogStream ls(log.trace());
- ls.print("GCTaskManager::initialize: distribution:");
- for (uint t = 0; t < workers(); t += 1) {
- ls.print(" %u", _processor_assignment[t]);
- }
- ls.cr();
- }
- }
- reset_busy_workers();
- set_unblocked();
- for (uint w = 0; w < workers(); w += 1) {
- set_resource_flag(w, false);
- }
- reset_delivered_tasks();
- reset_completed_tasks();
- reset_barriers();
- reset_emptied_queue();
-
- add_workers(true);
-}
-
-GCTaskManager::~GCTaskManager() {
- assert(busy_workers() == 0, "still have busy workers");
- assert(queue()->is_empty(), "still have queued work");
- NoopGCTask::destroy(_noop_task);
- _noop_task = NULL;
- if (_thread != NULL) {
- for (uint i = 0; i < created_workers(); i += 1) {
- GCTaskThread::destroy(thread(i));
- set_thread(i, NULL);
- }
- FREE_C_HEAP_ARRAY(GCTaskThread*, _thread);
- _thread = NULL;
- }
- if (_processor_assignment != NULL) {
- FREE_C_HEAP_ARRAY(uint, _processor_assignment);
- _processor_assignment = NULL;
- }
- if (_resource_flag != NULL) {
- FREE_C_HEAP_ARRAY(bool, _resource_flag);
- _resource_flag = NULL;
- }
- if (queue() != NULL) {
- GCTaskQueue* unsynchronized_queue = queue()->unsynchronized_queue();
- GCTaskQueue::destroy(unsynchronized_queue);
- SynchronizedGCTaskQueue::destroy(queue());
- _queue = NULL;
- }
- if (monitor() != NULL) {
- delete monitor();
- _monitor = NULL;
- }
-}
-
-void GCTaskManager::set_active_gang() {
- _active_workers =
- WorkerPolicy::calc_active_workers(workers(),
- active_workers(),
- Threads::number_of_non_daemon_threads());
-
- assert(!all_workers_active() || active_workers() == ParallelGCThreads,
- "all_workers_active() is incorrect: "
- "active %d ParallelGCThreads %u", active_workers(),
- ParallelGCThreads);
- _active_workers = MIN2(_active_workers, _workers);
- // "add_workers" does not guarantee any additional workers
- add_workers(false);
- log_trace(gc, task)("GCTaskManager::set_active_gang(): "
- "all_workers_active() %d workers %d "
- "active %d ParallelGCThreads %u",
- all_workers_active(), workers(), active_workers(),
- ParallelGCThreads);
-}
-
-// Create IdleGCTasks for inactive workers.
-// Creates tasks in a ResourceArea and assumes
-// an appropriate ResourceMark.
-void GCTaskManager::task_idle_workers() {
- {
- int more_inactive_workers = 0;
- {
- // Stop any idle tasks from exiting their IdleGCTask's
- // and get the count for additional IdleGCTask's under
- // the GCTaskManager's monitor so that the "more_inactive_workers"
- // count is correct.
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- _wait_helper.set_should_wait(true);
- // active_workers are a number being requested. idle_workers
- // are the number currently idle. If all the workers are being
- // requested to be active but some are already idle, reduce
- // the number of active_workers to be consistent with the
- // number of idle_workers. The idle_workers are stuck in
- // idle tasks and will no longer be release (since a new GC
- // is starting). Try later to release enough idle_workers
- // to allow the desired number of active_workers.
- more_inactive_workers =
- created_workers() - active_workers() - idle_workers();
- if (more_inactive_workers < 0) {
- int reduced_active_workers = active_workers() + more_inactive_workers;
- update_active_workers(reduced_active_workers);
- more_inactive_workers = 0;
- }
- log_trace(gc, task)("JT: %d workers %d active %d idle %d more %d",
- Threads::number_of_non_daemon_threads(),
- created_workers(),
- active_workers(),
- idle_workers(),
- more_inactive_workers);
- }
- GCTaskQueue* q = GCTaskQueue::create();
- for(uint i = 0; i < (uint) more_inactive_workers; i++) {
- q->enqueue(IdleGCTask::create_on_c_heap());
- increment_idle_workers();
- }
- assert(created_workers() == active_workers() + idle_workers(),
- "total workers should equal active + inactive");
- add_list(q);
- // GCTaskQueue* q was created in a ResourceArea so a
- // destroy() call is not needed.
- }
-}
-
-void GCTaskManager::release_idle_workers() {
- {
- MutexLocker ml(monitor(),
- Mutex::_no_safepoint_check_flag);
- _wait_helper.set_should_wait(false);
- monitor()->notify_all();
- // Release monitor
- }
-}
-
-void GCTaskManager::print_task_time_stamps() {
- if (!log_is_enabled(Debug, gc, task, time)) {
- return;
- }
- uint num_thr = created_workers();
- for(uint i=0; i < num_thr; i++) {
- GCTaskThread* t = thread(i);
- t->print_task_time_stamps();
- }
-}
-
-void GCTaskManager::print_threads_on(outputStream* st) {
- uint num_thr = created_workers();
- for (uint i = 0; i < num_thr; i++) {
- thread(i)->print_on(st);
- st->cr();
- }
-}
-
-void GCTaskManager::threads_do(ThreadClosure* tc) {
- assert(tc != NULL, "Null ThreadClosure");
- uint num_thr = created_workers();
- for (uint i = 0; i < num_thr; i++) {
- tc->do_thread(thread(i));
- }
-}
-
-GCTaskThread* GCTaskManager::thread(uint which) {
- assert(which < created_workers(), "index out of bounds");
- assert(_thread[which] != NULL, "shouldn't have null thread");
- return _thread[which];
-}
-
-void GCTaskManager::set_thread(uint which, GCTaskThread* value) {
- // "_created_workers" may not have been updated yet so use workers()
- assert(which < workers(), "index out of bounds");
- assert(value != NULL, "shouldn't have null thread");
- _thread[which] = value;
-}
-
-void GCTaskManager::add_task(GCTask* task) {
- assert(task != NULL, "shouldn't have null task");
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- if (TraceGCTaskManager) {
- tty->print_cr("GCTaskManager::add_task(" INTPTR_FORMAT " [%s])",
- p2i(task), GCTask::Kind::to_string(task->kind()));
- }
- queue()->enqueue(task);
- // Notify with the lock held to avoid missed notifies.
- if (TraceGCTaskManager) {
- tty->print_cr(" GCTaskManager::add_task (%s)->notify_all",
- monitor()->name());
- }
- (void) monitor()->notify_all();
- // Release monitor().
-}
-
-void GCTaskManager::add_list(GCTaskQueue* list) {
- assert(list != NULL, "shouldn't have null task");
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- if (TraceGCTaskManager) {
- tty->print_cr("GCTaskManager::add_list(%u)", list->length());
- }
- queue()->enqueue(list);
- // Notify with the lock held to avoid missed notifies.
- if (TraceGCTaskManager) {
- tty->print_cr(" GCTaskManager::add_list (%s)->notify_all",
- monitor()->name());
- }
- (void) monitor()->notify_all();
- // Release monitor().
-}
-
-// GC workers wait in get_task() for new work to be added
-// to the GCTaskManager's queue. When new work is added,
-// a notify is sent to the waiting GC workers which then
-// compete to get tasks. If a GC worker wakes up and there
-// is no work on the queue, it is given a noop_task to execute
-// and then loops to find more work.
-
-GCTask* GCTaskManager::get_task(uint which) {
- GCTask* result = NULL;
- // Grab the queue lock.
- MonitorLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- // Wait while the queue is block or
- // there is nothing to do, except maybe release resources.
- while (is_blocked() ||
- (queue()->is_empty() && !should_release_resources(which))) {
- if (TraceGCTaskManager) {
- tty->print_cr("GCTaskManager::get_task(%u)"
- " blocked: %s"
- " empty: %s"
- " release: %s",
- which,
- is_blocked() ? "true" : "false",
- queue()->is_empty() ? "true" : "false",
- should_release_resources(which) ? "true" : "false");
- tty->print_cr(" => (%s)->wait()",
- monitor()->name());
- }
- ml.wait(0);
- }
- // We've reacquired the queue lock here.
- // Figure out which condition caused us to exit the loop above.
- if (!queue()->is_empty()) {
- if (UseGCTaskAffinity) {
- result = queue()->dequeue(which);
- } else {
- result = queue()->dequeue();
- }
- if (result->is_barrier_task()) {
- assert(which != sentinel_worker(),
- "blocker shouldn't be bogus");
- set_blocking_worker(which);
- }
- } else {
- // The queue is empty, but we were woken up.
- // Just hand back a Noop task,
- // in case someone wanted us to release resources, or whatever.
- result = noop_task();
- }
- assert(result != NULL, "shouldn't have null task");
- if (TraceGCTaskManager) {
- tty->print_cr("GCTaskManager::get_task(%u) => " INTPTR_FORMAT " [%s]",
- which, p2i(result), GCTask::Kind::to_string(result->kind()));
- tty->print_cr(" %s", result->name());
- }
- if (!result->is_idle_task()) {
- increment_busy_workers();
- increment_delivered_tasks();
- }
- return result;
- // Release monitor().
-}
-
-void GCTaskManager::note_completion(uint which) {
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- if (TraceGCTaskManager) {
- tty->print_cr("GCTaskManager::note_completion(%u)", which);
- }
- // If we are blocked, check if the completing thread is the blocker.
- if (blocking_worker() == which) {
- assert(blocking_worker() != sentinel_worker(),
- "blocker shouldn't be bogus");
- increment_barriers();
- set_unblocked();
- }
- increment_completed_tasks();
- uint active = decrement_busy_workers();
- if ((active == 0) && (queue()->is_empty())) {
- increment_emptied_queue();
- if (TraceGCTaskManager) {
- tty->print_cr(" GCTaskManager::note_completion(%u) done", which);
- }
- }
- if (TraceGCTaskManager) {
- tty->print_cr(" GCTaskManager::note_completion(%u) (%s)->notify_all",
- which, monitor()->name());
- tty->print_cr(" "
- " blocked: %s"
- " empty: %s"
- " release: %s",
- is_blocked() ? "true" : "false",
- queue()->is_empty() ? "true" : "false",
- should_release_resources(which) ? "true" : "false");
- tty->print_cr(" "
- " delivered: %u"
- " completed: %u"
- " barriers: %u"
- " emptied: %u",
- delivered_tasks(),
- completed_tasks(),
- barriers(),
- emptied_queue());
- }
- // Tell everyone that a task has completed.
- (void) monitor()->notify_all();
- // Release monitor().
-}
-
-uint GCTaskManager::increment_busy_workers() {
- assert(queue()->own_lock(), "don't own the lock");
- _busy_workers += 1;
- return _busy_workers;
-}
-
-uint GCTaskManager::decrement_busy_workers() {
- assert(queue()->own_lock(), "don't own the lock");
- assert(_busy_workers > 0, "About to make a mistake");
- _busy_workers -= 1;
- return _busy_workers;
-}
-
-void GCTaskManager::release_all_resources() {
- // If you want this to be done atomically, do it in a WaitForBarrierGCTask.
- for (uint i = 0; i < created_workers(); i += 1) {
- set_resource_flag(i, true);
- }
-}
-
-bool GCTaskManager::should_release_resources(uint which) {
- // This can be done without a lock because each thread reads one element.
- return resource_flag(which);
-}
-
-void GCTaskManager::note_release(uint which) {
- // This can be done without a lock because each thread writes one element.
- set_resource_flag(which, false);
-}
-
-// "list" contains tasks that are ready to execute. Those
-// tasks are added to the GCTaskManager's queue of tasks and
-// then the GC workers are notified that there is new work to
-// do.
-//
-// Typically different types of tasks can be added to the "list".
-// For example in PSScavenge OldToYoungRootsTask, SerialOldToYoungRootsTask,
-// ScavengeRootsTask, and StealTask tasks are all added to the list
-// and then the GC workers are notified of new work. The tasks are
-// handed out in the order in which they are added to the list
-// (although execution is not necessarily in that order). As long
-// as any tasks are running the GCTaskManager will wait for execution
-// to complete. GC workers that execute a stealing task remain in
-// the stealing task until all stealing tasks have completed. The load
-// balancing afforded by the stealing tasks work best if the stealing
-// tasks are added last to the list.
-
-void GCTaskManager::execute_and_wait(GCTaskQueue* list) {
- WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
- list->enqueue(fin);
- // The barrier task will be read by one of the GC
- // workers once it is added to the list of tasks.
- // Be sure that is globally visible before the
- // GC worker reads it (which is after the task is added
- // to the list of tasks below).
- OrderAccess::storestore();
- add_list(list);
- fin->wait_for(true /* reset */);
- // We have to release the barrier tasks!
- WaitForBarrierGCTask::destroy(fin);
-}
-
-bool GCTaskManager::resource_flag(uint which) {
- assert(which < workers(), "index out of bounds");
- return _resource_flag[which];
-}
-
-void GCTaskManager::set_resource_flag(uint which, bool value) {
- assert(which < workers(), "index out of bounds");
- _resource_flag[which] = value;
-}
-
-//
-// NoopGCTask
-//
-
-NoopGCTask* NoopGCTask::create_on_c_heap() {
- NoopGCTask* result = new(ResourceObj::C_HEAP, mtGC) NoopGCTask();
- return result;
-}
-
-void NoopGCTask::destroy(NoopGCTask* that) {
- if (that != NULL) {
- that->destruct();
- FreeHeap(that);
- }
-}
-
-// This task should never be performing GC work that require
-// a valid GC id.
-NoopGCTask::NoopGCTask() : GCTask(GCTask::Kind::noop_task, GCId::undefined()) { }
-
-void NoopGCTask::destruct() {
- // This has to know it's superclass structure, just like the constructor.
- this->GCTask::destruct();
- // Nothing else to do.
-}
-
-//
-// IdleGCTask
-//
-
-IdleGCTask* IdleGCTask::create() {
- IdleGCTask* result = new IdleGCTask(false);
- assert(UseDynamicNumberOfGCThreads,
- "Should only be used with dynamic GC thread");
- return result;
-}
-
-IdleGCTask* IdleGCTask::create_on_c_heap() {
- IdleGCTask* result = new(ResourceObj::C_HEAP, mtGC) IdleGCTask(true);
- assert(UseDynamicNumberOfGCThreads,
- "Should only be used with dynamic GC thread");
- return result;
-}
-
-void IdleGCTask::do_it(GCTaskManager* manager, uint which) {
- WaitHelper* wait_helper = manager->wait_helper();
- log_trace(gc, task)("[" INTPTR_FORMAT "] IdleGCTask:::do_it() should_wait: %s",
- p2i(this), wait_helper->should_wait() ? "true" : "false");
-
- MonitorLocker ml(manager->monitor(), Mutex::_no_safepoint_check_flag);
- log_trace(gc, task)("--- idle %d", which);
- // Increment has to be done when the idle tasks are created.
- // manager->increment_idle_workers();
- ml.notify_all();
- while (wait_helper->should_wait()) {
- log_trace(gc, task)("[" INTPTR_FORMAT "] IdleGCTask::do_it() [" INTPTR_FORMAT "] (%s)->wait()",
- p2i(this), p2i(manager->monitor()), manager->monitor()->name());
- ml.wait(0);
- }
- manager->decrement_idle_workers();
-
- log_trace(gc, task)("--- release %d", which);
- log_trace(gc, task)("[" INTPTR_FORMAT "] IdleGCTask::do_it() returns should_wait: %s",
- p2i(this), wait_helper->should_wait() ? "true" : "false");
- // Release monitor().
-}
-
-void IdleGCTask::destroy(IdleGCTask* that) {
- if (that != NULL) {
- that->destruct();
- if (that->is_c_heap_obj()) {
- FreeHeap(that);
- }
- }
-}
-
-void IdleGCTask::destruct() {
- // This has to know it's superclass structure, just like the constructor.
- this->GCTask::destruct();
- // Nothing else to do.
-}
-
-//
-// WaitForBarrierGCTask
-//
-WaitForBarrierGCTask* WaitForBarrierGCTask::create() {
- WaitForBarrierGCTask* result = new WaitForBarrierGCTask();
- return result;
-}
-
-WaitForBarrierGCTask::WaitForBarrierGCTask() : GCTask(GCTask::Kind::wait_for_barrier_task) { }
-
-void WaitForBarrierGCTask::destroy(WaitForBarrierGCTask* that) {
- if (that != NULL) {
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "] WaitForBarrierGCTask::destroy()", p2i(that));
- }
- that->destruct();
- }
-}
-
-void WaitForBarrierGCTask::destruct() {
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "] WaitForBarrierGCTask::destruct()", p2i(this));
- }
- this->GCTask::destruct();
- // Clean up that should be in the destructor,
- // except that ResourceMarks don't call destructors.
- _wait_helper.release_monitor();
-}
-
-void WaitForBarrierGCTask::do_it_internal(GCTaskManager* manager, uint which) {
- // Wait for this to be the only busy worker.
- assert(manager->monitor()->owned_by_self(), "don't own the lock");
- assert(manager->is_blocked(), "manager isn't blocked");
- while (manager->busy_workers() > 1) {
- if (TraceGCTaskManager) {
- tty->print_cr("WaitForBarrierGCTask::do_it(%u) waiting on %u workers",
- which, manager->busy_workers());
- }
- manager->monitor()->wait_without_safepoint_check(0);
- }
-}
-
-void WaitForBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " WaitForBarrierGCTask::do_it() waiting for idle",
- p2i(this));
- }
- {
- // First, wait for the barrier to arrive.
- MutexLocker ml(manager->lock(), Mutex::_no_safepoint_check_flag);
- do_it_internal(manager, which);
- // Release manager->lock().
- }
- // Then notify the waiter.
- _wait_helper.notify();
-}
-
-WaitHelper::WaitHelper() : _monitor(MonitorSupply::reserve()), _should_wait(true) {
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " WaitHelper::WaitHelper()"
- " monitor: " INTPTR_FORMAT,
- p2i(this), p2i(monitor()));
- }
-}
-
-void WaitHelper::release_monitor() {
- assert(_monitor != NULL, "");
- MonitorSupply::release(_monitor);
- _monitor = NULL;
-}
-
-WaitHelper::~WaitHelper() {
- release_monitor();
-}
-
-void WaitHelper::wait_for(bool reset) {
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " WaitForBarrierGCTask::wait_for()"
- " should_wait: %s",
- p2i(this), should_wait() ? "true" : "false");
- }
- {
- // Grab the lock and check again.
- MonitorLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- while (should_wait()) {
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " WaitForBarrierGCTask::wait_for()"
- " [" INTPTR_FORMAT "] (%s)->wait()",
- p2i(this), p2i(monitor()), monitor()->name());
- }
- ml.wait(0);
- }
- // Reset the flag in case someone reuses this task.
- if (reset) {
- set_should_wait(true);
- }
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " WaitForBarrierGCTask::wait_for() returns"
- " should_wait: %s",
- p2i(this), should_wait() ? "true" : "false");
- }
- // Release monitor().
- }
-}
-
-void WaitHelper::notify() {
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- set_should_wait(false);
- // Waiter doesn't miss the notify in the wait_for method
- // since it checks the flag after grabbing the monitor.
- if (TraceGCTaskManager) {
- tty->print_cr("[" INTPTR_FORMAT "]"
- " WaitForBarrierGCTask::do_it()"
- " [" INTPTR_FORMAT "] (%s)->notify_all()",
- p2i(this), p2i(monitor()), monitor()->name());
- }
- monitor()->notify_all();
-}
-
-Mutex* MonitorSupply::_lock = NULL;
-GrowableArray<Monitor*>* MonitorSupply::_freelist = NULL;
-
-Monitor* MonitorSupply::reserve() {
- Monitor* result = NULL;
- // Lazy initialization: possible race.
- if (lock() == NULL) {
- _lock = new Mutex(Mutex::barrier, // rank
- "MonitorSupply mutex", // name
- Mutex::_allow_vm_block_flag); // allow_vm_block
- }
- {
- MutexLocker ml(lock());
- // Lazy initialization.
- if (freelist() == NULL) {
- _freelist =
- new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads,
- true);
- }
- if (! freelist()->is_empty()) {
- result = freelist()->pop();
- } else {
- result = new Monitor(Mutex::barrier, // rank
- "MonitorSupply monitor", // name
- Mutex::_allow_vm_block_flag, // allow_vm_block
- Monitor::_safepoint_check_never);
- }
- guarantee(result != NULL, "shouldn't return NULL");
- assert(!result->is_locked(), "shouldn't be locked");
- // release lock().
- }
- return result;
-}
-
-void MonitorSupply::release(Monitor* instance) {
- assert(instance != NULL, "shouldn't release NULL");
- assert(!instance->is_locked(), "shouldn't be locked");
- {
- MutexLocker ml(lock());
- freelist()->push(instance);
- // release lock().
- }
-}
--- a/src/hotspot/share/gc/parallel/gcTaskManager.hpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,668 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_GCTASKMANAGER_HPP
-#define SHARE_GC_PARALLEL_GCTASKMANAGER_HPP
-
-#include "runtime/mutex.hpp"
-#include "utilities/growableArray.hpp"
-
-//
-// The GCTaskManager is a queue of GCTasks, and accessors
-// to allow the queue to be accessed from many threads.
-//
-
-// Forward declarations of types defined in this file.
-class GCTask;
-class GCTaskQueue;
-class SynchronizedGCTaskQueue;
-class GCTaskManager;
-// Some useful subclasses of GCTask. You can also make up your own.
-class NoopGCTask;
-class WaitForBarrierGCTask;
-class IdleGCTask;
-// A free list of Monitor*'s.
-class MonitorSupply;
-
-// Forward declarations of classes referenced in this file via pointer.
-class GCTaskThread;
-class Mutex;
-class Monitor;
-class ThreadClosure;
-
-// The abstract base GCTask.
-class GCTask : public ResourceObj {
-public:
- // Known kinds of GCTasks, for predicates.
- class Kind : AllStatic {
- public:
- enum kind {
- unknown_task,
- ordinary_task,
- wait_for_barrier_task,
- noop_task,
- idle_task
- };
- static const char* to_string(kind value);
- };
-private:
- // Instance state.
- Kind::kind _kind; // For runtime type checking.
- uint _affinity; // Which worker should run task.
- GCTask* _newer; // Tasks are on doubly-linked ...
- GCTask* _older; // ... lists.
- uint _gc_id; // GC Id to use for the thread that executes this task
-public:
- virtual char* name() { return (char *)"task"; }
-
- uint gc_id() { return _gc_id; }
-
- // Abstract do_it method
- virtual void do_it(GCTaskManager* manager, uint which) = 0;
- // Accessors
- Kind::kind kind() const {
- return _kind;
- }
- uint affinity() const {
- return _affinity;
- }
- GCTask* newer() const {
- return _newer;
- }
- void set_newer(GCTask* n) {
- _newer = n;
- }
- GCTask* older() const {
- return _older;
- }
- void set_older(GCTask* p) {
- _older = p;
- }
- // Predicates.
- bool is_ordinary_task() const {
- return kind()==Kind::ordinary_task;
- }
- bool is_barrier_task() const {
- return kind()==Kind::wait_for_barrier_task;
- }
- bool is_noop_task() const {
- return kind()==Kind::noop_task;
- }
- bool is_idle_task() const {
- return kind()==Kind::idle_task;
- }
- void print(const char* message) const PRODUCT_RETURN;
-protected:
- // Constructors: Only create subclasses.
- // An ordinary GCTask.
- GCTask();
- // A GCTask of a particular kind, usually barrier or noop.
- GCTask(Kind::kind kind);
- GCTask(Kind::kind kind, uint gc_id);
- // We want a virtual destructor because virtual methods,
- // but since ResourceObj's don't have their destructors
- // called, we don't have one at all. Instead we have
- // this method, which gets called by subclasses to clean up.
- virtual void destruct();
- // Methods.
- void initialize(Kind::kind kind, uint gc_id);
-};
-
-// A doubly-linked list of GCTasks.
-// The list is not synchronized, because sometimes we want to
-// build up a list and then make it available to other threads.
-// See also: SynchronizedGCTaskQueue.
-class GCTaskQueue : public ResourceObj {
-private:
- // Instance state.
- GCTask* _insert_end; // Tasks are enqueued at this end.
- GCTask* _remove_end; // Tasks are dequeued from this end.
- uint _length; // The current length of the queue.
- const bool _is_c_heap_obj; // Is this a CHeapObj?
-public:
- // Factory create and destroy methods.
- // Create as ResourceObj.
- static GCTaskQueue* create();
- // Create as CHeapObj.
- static GCTaskQueue* create_on_c_heap();
- // Destroyer.
- static void destroy(GCTaskQueue* that);
- // Accessors.
- // These just examine the state of the queue.
- bool is_empty() const {
- assert(((insert_end() == NULL && remove_end() == NULL) ||
- (insert_end() != NULL && remove_end() != NULL)),
- "insert_end and remove_end don't match");
- assert((insert_end() != NULL) || (_length == 0), "Not empty");
- return insert_end() == NULL;
- }
- uint length() const {
- return _length;
- }
- // Methods.
- // Enqueue one task.
- void enqueue(GCTask* task);
- // Enqueue a list of tasks. Empties the argument list.
- void enqueue(GCTaskQueue* list);
- // Dequeue one task.
- GCTask* dequeue();
- // Dequeue one task, preferring one with affinity.
- GCTask* dequeue(uint affinity);
-protected:
- // Constructor. Clients use factory, but there might be subclasses.
- GCTaskQueue(bool on_c_heap);
- // Destructor-like method.
- // Because ResourceMark doesn't call destructors.
- // This method cleans up like one.
- virtual void destruct();
- // Accessors.
- GCTask* insert_end() const {
- return _insert_end;
- }
- void set_insert_end(GCTask* value) {
- _insert_end = value;
- }
- GCTask* remove_end() const {
- return _remove_end;
- }
- void set_remove_end(GCTask* value) {
- _remove_end = value;
- }
- void increment_length() {
- _length += 1;
- }
- void decrement_length() {
- _length -= 1;
- }
- void set_length(uint value) {
- _length = value;
- }
- bool is_c_heap_obj() const {
- return _is_c_heap_obj;
- }
- // Methods.
- void initialize();
- GCTask* remove(); // Remove from remove end.
- GCTask* remove(GCTask* task); // Remove from the middle.
- void print(const char* message) const PRODUCT_RETURN;
- // Debug support
- void verify_length() const PRODUCT_RETURN;
-};
-
-// A GCTaskQueue that can be synchronized.
-// This "has-a" GCTaskQueue and a mutex to do the exclusion.
-class SynchronizedGCTaskQueue : public CHeapObj<mtGC> {
-private:
- // Instance state.
- GCTaskQueue* _unsynchronized_queue; // Has-a unsynchronized queue.
- Monitor * _lock; // Lock to control access.
-public:
- // Factory create and destroy methods.
- static SynchronizedGCTaskQueue* create(GCTaskQueue* queue, Monitor * lock) {
- return new SynchronizedGCTaskQueue(queue, lock);
- }
- static void destroy(SynchronizedGCTaskQueue* that) {
- if (that != NULL) {
- delete that;
- }
- }
- // Accessors
- GCTaskQueue* unsynchronized_queue() const {
- return _unsynchronized_queue;
- }
- Monitor * lock() const {
- return _lock;
- }
- // GCTaskQueue wrapper methods.
- // These check that you hold the lock
- // and then call the method on the queue.
- bool is_empty() const {
- guarantee(own_lock(), "don't own the lock");
- return unsynchronized_queue()->is_empty();
- }
- void enqueue(GCTask* task) {
- guarantee(own_lock(), "don't own the lock");
- unsynchronized_queue()->enqueue(task);
- }
- void enqueue(GCTaskQueue* list) {
- guarantee(own_lock(), "don't own the lock");
- unsynchronized_queue()->enqueue(list);
- }
- GCTask* dequeue() {
- guarantee(own_lock(), "don't own the lock");
- return unsynchronized_queue()->dequeue();
- }
- GCTask* dequeue(uint affinity) {
- guarantee(own_lock(), "don't own the lock");
- return unsynchronized_queue()->dequeue(affinity);
- }
- uint length() const {
- guarantee(own_lock(), "don't own the lock");
- return unsynchronized_queue()->length();
- }
- // For guarantees.
- bool own_lock() const {
- return lock()->owned_by_self();
- }
-protected:
- // Constructor. Clients use factory, but there might be subclasses.
- SynchronizedGCTaskQueue(GCTaskQueue* queue, Monitor * lock);
- // Destructor. Not virtual because no virtuals.
- ~SynchronizedGCTaskQueue();
-};
-
-class WaitHelper {
- private:
- Monitor* _monitor;
- volatile bool _should_wait;
- public:
- WaitHelper();
- ~WaitHelper();
- void wait_for(bool reset);
- void notify();
- void set_should_wait(bool value) {
- _should_wait = value;
- }
-
- Monitor* monitor() const {
- return _monitor;
- }
- bool should_wait() const {
- return _should_wait;
- }
- void release_monitor();
-};
-
-// Dynamic number of GC threads
-//
-// GC threads wait in get_task() for work (i.e., a task) to perform.
-// When the number of GC threads was static, the number of tasks
-// created to do a job was equal to or greater than the maximum
-// number of GC threads (ParallelGCThreads). The job might be divided
-// into a number of tasks greater than the number of GC threads for
-// load balancing (i.e., over partitioning). The last task to be
-// executed by a GC thread in a job is a work stealing task. A
-// GC thread that gets a work stealing task continues to execute
-// that task until the job is done. In the static number of GC threads
-// case, tasks are added to a queue (FIFO). The work stealing tasks are
-// the last to be added. Once the tasks are added, the GC threads grab
-// a task and go. A single thread can do all the non-work stealing tasks
-// and then execute a work stealing and wait for all the other GC threads
-// to execute their work stealing task.
-// In the dynamic number of GC threads implementation, idle-tasks are
-// created to occupy the non-participating or "inactive" threads. An
-// idle-task makes the GC thread wait on a barrier that is part of the
-// GCTaskManager. The GC threads that have been "idled" in a IdleGCTask
-// are released once all the active GC threads have finished their work
-// stealing tasks. The GCTaskManager does not wait for all the "idled"
-// GC threads to resume execution. When those GC threads do resume
-// execution in the course of the thread scheduling, they call get_tasks()
-// as all the other GC threads do. Because all the "idled" threads are
-// not required to execute in order to finish a job, it is possible for
-// a GC thread to still be "idled" when the next job is started. Such
-// a thread stays "idled" for the next job. This can result in a new
-// job not having all the expected active workers. For example if on
-// job requests 4 active workers out of a total of 10 workers so the
-// remaining 6 are "idled", if the next job requests 6 active workers
-// but all 6 of the "idled" workers are still idle, then the next job
-// will only get 4 active workers.
-// The implementation for the parallel old compaction phase has an
-// added complication. In the static case parold partitions the chunks
-// ready to be filled into stacks, one for each GC thread. A GC thread
-// executing a draining task (drains the stack of ready chunks)
-// claims a stack according to it's id (the unique ordinal value assigned
-// to each GC thread). In the dynamic case not all GC threads will
-// actively participate so stacks with ready to fill chunks can only be
-// given to the active threads. An initial implementation chose stacks
-// number 1-n to get the ready chunks and required that GC threads
-// 1-n be the active workers. This was undesirable because it required
-// certain threads to participate. In the final implementation a
-// list of stacks equal in number to the active workers are filled
-// with ready chunks. GC threads that participate get a stack from
-// the task (DrainStacksCompactionTask), empty the stack, and then add it to a
-// recycling list at the end of the task. If the same GC thread gets
-// a second task, it gets a second stack to drain and returns it. The
-// stacks are added to a recycling list so that later stealing tasks
-// for this tasks can get a stack from the recycling list. Stealing tasks
-// use the stacks in its work in a way similar to the draining tasks.
-// A thread is not guaranteed to get anything but a stealing task and
-// a thread that only gets a stealing task has to get a stack. A failed
-// implementation tried to have the GC threads keep the stack they used
-// during a draining task for later use in the stealing task but that didn't
-// work because as noted a thread is not guaranteed to get a draining task.
-//
-// For PSScavenge and ParCompactionManager the GC threads are
-// held in the GCTaskThread** _thread array in GCTaskManager.
-
-
-class GCTaskManager : public CHeapObj<mtGC> {
- friend class ParCompactionManager;
- friend class PSParallelCompact;
- friend class PSScavenge;
- friend class PSRefProcTaskExecutor;
- friend class RefProcTaskExecutor;
- friend class GCTaskThread;
- friend class IdleGCTask;
-private:
- // Instance state.
- const uint _workers; // Number of workers.
- Monitor* _monitor; // Notification of changes.
- SynchronizedGCTaskQueue* _queue; // Queue of tasks.
- GCTaskThread** _thread; // Array of worker threads.
- uint _created_workers; // Number of workers created.
- uint _active_workers; // Number of active workers.
- uint _busy_workers; // Number of busy workers.
- uint _blocking_worker; // The worker that's blocking.
- bool* _resource_flag; // Array of flag per threads.
- uint _delivered_tasks; // Count of delivered tasks.
- uint _completed_tasks; // Count of completed tasks.
- uint _barriers; // Count of barrier tasks.
- uint _emptied_queue; // Times we emptied the queue.
- NoopGCTask* _noop_task; // The NoopGCTask instance.
- WaitHelper _wait_helper; // Used by inactive worker
- volatile uint _idle_workers; // Number of idled workers
- uint* _processor_assignment; // Worker to cpu mappings. May
- // be used lazily
-public:
- // Factory create and destroy methods.
- static GCTaskManager* create(uint workers) {
- return new GCTaskManager(workers);
- }
- static void destroy(GCTaskManager* that) {
- if (that != NULL) {
- delete that;
- }
- }
- // Accessors.
- uint busy_workers() const {
- return _busy_workers;
- }
- volatile uint idle_workers() const {
- return _idle_workers;
- }
- // Pun between Monitor* and Mutex*
- Monitor* monitor() const {
- return _monitor;
- }
- Monitor * lock() const {
- return _monitor;
- }
- WaitHelper* wait_helper() {
- return &_wait_helper;
- }
- // Methods.
- // Add the argument task to be run.
- void add_task(GCTask* task);
- // Add a list of tasks. Removes task from the argument list.
- void add_list(GCTaskQueue* list);
- // Claim a task for argument worker.
- GCTask* get_task(uint which);
- // Note the completion of a task by the argument worker.
- void note_completion(uint which);
- // Is the queue blocked from handing out new tasks?
- bool is_blocked() const {
- return (blocking_worker() != sentinel_worker());
- }
- // Request that all workers release their resources.
- void release_all_resources();
- // Ask if a particular worker should release its resources.
- bool should_release_resources(uint which); // Predicate.
- // Note the release of resources by the argument worker.
- void note_release(uint which);
- // Create IdleGCTasks for inactive workers and start workers
- void task_idle_workers();
- // Release the workers in IdleGCTasks
- void release_idle_workers();
- // Constants.
- // A sentinel worker identifier.
- static uint sentinel_worker() {
- return (uint) -1; // Why isn't there a max_uint?
- }
-
- // Execute the task queue and wait for the completion.
- void execute_and_wait(GCTaskQueue* list);
-
- void print_task_time_stamps();
- void print_threads_on(outputStream* st);
- void threads_do(ThreadClosure* tc);
-
-protected:
- // Constructors. Clients use factory, but there might be subclasses.
- // Create a GCTaskManager with the appropriate number of workers.
- GCTaskManager(uint workers);
- // Make virtual if necessary.
- ~GCTaskManager();
- // Accessors.
- uint workers() const {
- return _workers;
- }
- uint update_active_workers(uint v) {
- assert(v <= _workers, "Trying to set more workers active than there are");
- _active_workers = MIN2(v, _workers);
- assert(v != 0, "Trying to set active workers to 0");
- _active_workers = MAX2(1U, _active_workers);
- return _active_workers;
- }
- // Sets the number of threads that will be used in a collection
- void set_active_gang();
-
- SynchronizedGCTaskQueue* queue() const {
- return _queue;
- }
- NoopGCTask* noop_task() const {
- return _noop_task;
- }
- // Bounds-checking per-thread data accessors.
- GCTaskThread* thread(uint which);
- void set_thread(uint which, GCTaskThread* value);
- bool resource_flag(uint which);
- void set_resource_flag(uint which, bool value);
- // Modifier methods with some semantics.
- // Is any worker blocking handing out new tasks?
- uint blocking_worker() const {
- return _blocking_worker;
- }
- void set_blocking_worker(uint value) {
- _blocking_worker = value;
- }
- void set_unblocked() {
- set_blocking_worker(sentinel_worker());
- }
- // Count of busy workers.
- void reset_busy_workers() {
- _busy_workers = 0;
- }
- uint increment_busy_workers();
- uint decrement_busy_workers();
- // Count of tasks delivered to workers.
- uint delivered_tasks() const {
- return _delivered_tasks;
- }
- void increment_delivered_tasks() {
- _delivered_tasks += 1;
- }
- void reset_delivered_tasks() {
- _delivered_tasks = 0;
- }
- // Count of tasks completed by workers.
- uint completed_tasks() const {
- return _completed_tasks;
- }
- void increment_completed_tasks() {
- _completed_tasks += 1;
- }
- void reset_completed_tasks() {
- _completed_tasks = 0;
- }
- // Count of barrier tasks completed.
- uint barriers() const {
- return _barriers;
- }
- void increment_barriers() {
- _barriers += 1;
- }
- void reset_barriers() {
- _barriers = 0;
- }
- // Count of how many times the queue has emptied.
- uint emptied_queue() const {
- return _emptied_queue;
- }
- void increment_emptied_queue() {
- _emptied_queue += 1;
- }
- void reset_emptied_queue() {
- _emptied_queue = 0;
- }
- void increment_idle_workers() {
- _idle_workers++;
- }
- void decrement_idle_workers() {
- _idle_workers--;
- }
- // Other methods.
- void initialize();
-
- public:
- // Return true if all workers are currently active.
- bool all_workers_active() { return workers() == active_workers(); }
- uint active_workers() const {
- return _active_workers;
- }
- uint created_workers() const {
- return _created_workers;
- }
- // Create a GC worker and install into GCTaskManager
- GCTaskThread* install_worker(uint worker_id);
- // Add GC workers as needed.
- void add_workers(bool initializing);
- // Base name (without worker id #) of threads.
- const char* group_name();
-};
-
-//
-// Some exemplary GCTasks.
-//
-
-// A noop task that does nothing,
-// except take us around the GCTaskThread loop.
-class NoopGCTask : public GCTask {
-public:
- // Factory create and destroy methods.
- static NoopGCTask* create_on_c_heap();
- static void destroy(NoopGCTask* that);
-
- virtual char* name() { return (char *)"noop task"; }
- // Methods from GCTask.
- void do_it(GCTaskManager* manager, uint which) {
- // Nothing to do.
- }
-protected:
- // Constructor.
- NoopGCTask();
- // Destructor-like method.
- void destruct();
-};
-
-// A WaitForBarrierGCTask is a GCTask
-// with a method you can call to wait until
-// the BarrierGCTask is done.
-class WaitForBarrierGCTask : public GCTask {
- friend class GCTaskManager;
- friend class IdleGCTask;
-private:
- // Instance state.
- WaitHelper _wait_helper;
- WaitForBarrierGCTask();
-public:
- virtual char* name() { return (char *) "waitfor-barrier-task"; }
-
- // Factory create and destroy methods.
- static WaitForBarrierGCTask* create();
- static void destroy(WaitForBarrierGCTask* that);
- // Methods.
- void do_it(GCTaskManager* manager, uint which);
-protected:
- // Destructor-like method.
- void destruct();
-
- // Methods.
- // Wait for this to be the only task running.
- void do_it_internal(GCTaskManager* manager, uint which);
-
- void wait_for(bool reset) {
- _wait_helper.wait_for(reset);
- }
-};
-
-// Task that is used to idle a GC task when fewer than
-// the maximum workers are wanted.
-class IdleGCTask : public GCTask {
- const bool _is_c_heap_obj; // Was allocated on the heap.
- public:
- bool is_c_heap_obj() {
- return _is_c_heap_obj;
- }
- // Factory create and destroy methods.
- static IdleGCTask* create();
- static IdleGCTask* create_on_c_heap();
- static void destroy(IdleGCTask* that);
-
- virtual char* name() { return (char *)"idle task"; }
- // Methods from GCTask.
- virtual void do_it(GCTaskManager* manager, uint which);
-protected:
- // Constructor.
- IdleGCTask(bool on_c_heap) :
- GCTask(GCTask::Kind::idle_task),
- _is_c_heap_obj(on_c_heap) {
- // Nothing to do.
- }
- // Destructor-like method.
- void destruct();
-};
-
-class MonitorSupply : public AllStatic {
-private:
- // State.
- // Control multi-threaded access.
- static Mutex* _lock;
- // The list of available Monitor*'s.
- static GrowableArray<Monitor*>* _freelist;
-public:
- // Reserve a Monitor*.
- static Monitor* reserve();
- // Release a Monitor*.
- static void release(Monitor* instance);
-private:
- // Accessors.
- static Mutex* lock() {
- return _lock;
- }
- static GrowableArray<Monitor*>* freelist() {
- return _freelist;
- }
-};
-
-#endif // SHARE_GC_PARALLEL_GCTASKMANAGER_HPP
--- a/src/hotspot/share/gc/parallel/gcTaskThread.cpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/gcTaskThread.hpp"
-#include "gc/shared/gcId.hpp"
-#include "logging/log.hpp"
-#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/handles.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/thread.hpp"
-
-GCTaskThread::GCTaskThread(GCTaskManager* manager,
- uint which,
- uint processor_id) :
- _manager(manager),
- _processor_id(processor_id),
- _time_stamps(NULL),
- _time_stamp_index(0)
-{
- set_id(which);
- set_name("%s#%d", manager->group_name(), which);
-}
-
-GCTaskThread::~GCTaskThread() {
- if (_time_stamps != NULL) {
- FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps);
- }
-}
-
-void GCTaskThread::add_task_timestamp(const char* name, jlong t_entry, jlong t_exit) {
- if (_time_stamp_index < GCTaskTimeStampEntries) {
- GCTaskTimeStamp* time_stamp = time_stamp_at(_time_stamp_index);
- time_stamp->set_name(name);
- time_stamp->set_entry_time(t_entry);
- time_stamp->set_exit_time(t_exit);
- } else {
- if (_time_stamp_index == GCTaskTimeStampEntries) {
- log_warning(gc, task, time)("GC-thread %u: Too many timestamps, ignoring future ones. "
- "Increase GCTaskTimeStampEntries to get more info.",
- id());
- }
- // Let _time_stamp_index keep counting to give the user an idea about how many
- // are needed.
- }
- _time_stamp_index++;
-}
-
-GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
- assert(index < GCTaskTimeStampEntries, "Precondition");
- if (_time_stamps == NULL) {
- // We allocate the _time_stamps array lazily since logging can be enabled dynamically
- GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
- if (!Atomic::replace_if_null(time_stamps, &_time_stamps)) {
- // Someone already setup the time stamps
- FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
- }
- }
- return &(_time_stamps[index]);
-}
-
-void GCTaskThread::print_task_time_stamps() {
- assert(log_is_enabled(Debug, gc, task, time), "Sanity");
-
- // Since _time_stamps is now lazily allocated we need to check that it
- // has in fact been allocated when calling this function.
- if (_time_stamps != NULL) {
- log_debug(gc, task, time)("GC-Thread %u entries: %d%s", id(),
- _time_stamp_index,
- _time_stamp_index >= GCTaskTimeStampEntries ? " (overflow)" : "");
- const uint max_index = MIN2(_time_stamp_index, GCTaskTimeStampEntries);
- for (uint i = 0; i < max_index; i++) {
- GCTaskTimeStamp* time_stamp = time_stamp_at(i);
- log_debug(gc, task, time)("\t[ %s " JLONG_FORMAT " " JLONG_FORMAT " ]",
- time_stamp->name(),
- time_stamp->entry_time(),
- time_stamp->exit_time());
- }
-
- // Reset after dumping the data
- _time_stamp_index = 0;
- }
-}
-
-// GC workers get tasks from the GCTaskManager and execute
-// them in this method. If there are no tasks to execute,
-// the GC workers wait in the GCTaskManager's get_task()
-// for tasks to be enqueued for execution.
-
-void GCTaskThread::run() {
- // Bind yourself to your processor.
- if (processor_id() != GCTaskManager::sentinel_worker()) {
- log_trace(gc, task, thread)("GCTaskThread::run: binding to processor %u", processor_id());
- if (!os::bind_to_processor(processor_id())) {
- DEBUG_ONLY(
- log_warning(gc)("Couldn't bind GCTaskThread %u to processor %u",
- which(), processor_id());
- )
- }
- }
- // Part of thread setup.
- // ??? Are these set up once here to make subsequent ones fast?
- HandleMark hm_outer;
- ResourceMark rm_outer;
-
- TimeStamp timer;
-
- for (;/* ever */;) {
- // These are so we can flush the resources allocated in the inner loop.
- HandleMark hm_inner;
- ResourceMark rm_inner;
- for (; /* break */; ) {
- // This will block until there is a task to be gotten.
- GCTask* task = manager()->get_task(which());
- GCIdMark gc_id_mark(task->gc_id());
- // Record if this is an idle task for later use.
- bool is_idle_task = task->is_idle_task();
- // In case the update is costly
- if (log_is_enabled(Debug, gc, task, time)) {
- timer.update();
- }
-
- jlong entry_time = timer.ticks();
- char* name = task->name();
-
- // If this is the barrier task, it can be destroyed
- // by the GC task manager once the do_it() executes.
- task->do_it(manager(), which());
-
- // Use the saved value of is_idle_task because references
- // using "task" are not reliable for the barrier task.
- if (!is_idle_task) {
- manager()->note_completion(which());
-
- if (log_is_enabled(Debug, gc, task, time)) {
- timer.update();
- add_task_timestamp(name, entry_time, timer.ticks());
- }
- } else {
- // idle tasks complete outside the normal accounting
- // so that a task can complete without waiting for idle tasks.
- // They have to be terminated separately.
- IdleGCTask::destroy((IdleGCTask*)task);
- }
-
- // Check if we should release our inner resources.
- if (manager()->should_release_resources(which())) {
- manager()->note_release(which());
- break;
- }
- }
- }
-}
--- a/src/hotspot/share/gc/parallel/gcTaskThread.hpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_GCTASKTHREAD_HPP
-#define SHARE_GC_PARALLEL_GCTASKTHREAD_HPP
-
-#include "runtime/thread.hpp"
-
-// Forward declarations of classes defined here.
-class GCTaskThread;
-class GCTaskTimeStamp;
-
-// Declarations of classes referenced in this file via pointer.
-class GCTaskManager;
-
-class GCTaskThread : public WorkerThread {
- friend class GCTaskManager;
-private:
- // Instance state.
- GCTaskManager* _manager; // Manager for worker.
- const uint _processor_id; // Which processor the worker is on.
-
- GCTaskTimeStamp* _time_stamps;
- uint _time_stamp_index;
-
- GCTaskTimeStamp* time_stamp_at(uint index);
- void add_task_timestamp(const char* name, jlong t_entry, jlong t_exit);
-
- // Factory create and destroy methods.
- static GCTaskThread* create(GCTaskManager* manager,
- uint which,
- uint processor_id) {
- return new GCTaskThread(manager, which, processor_id);
- }
- public:
-
- static void destroy(GCTaskThread* manager) {
- if (manager != NULL) {
- delete manager;
- }
- }
- // Methods from Thread.
- bool is_GC_task_thread() const {
- return true;
- }
- virtual void run();
-
- void print_task_time_stamps();
-
-protected:
- // Constructor. Clients use factory, but there could be subclasses.
- GCTaskThread(GCTaskManager* manager, uint which, uint processor_id);
- // Destructor: virtual destructor because of virtual methods.
- virtual ~GCTaskThread();
- // Accessors.
- GCTaskManager* manager() const {
- return _manager;
- }
- uint which() const {
- return id();
- }
- uint processor_id() const {
- return _processor_id;
- }
-};
-
-class GCTaskTimeStamp : public CHeapObj<mtGC>
-{
- private:
- jlong _entry_time;
- jlong _exit_time;
- const char* _name;
-
- public:
- jlong entry_time() { return _entry_time; }
- jlong exit_time() { return _exit_time; }
- const char* name() const { return _name; }
-
- void set_entry_time(jlong time) { _entry_time = time; }
- void set_exit_time(jlong time) { _exit_time = time; }
- void set_name(const char* name) { _name = name; }
-};
-
-#endif // SHARE_GC_PARALLEL_GCTASKTHREAD_HPP
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
#include "gc/parallel/parallelArguments.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
@@ -59,7 +58,6 @@
PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
-GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
jint ParallelScavengeHeap::initialize() {
const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
@@ -116,13 +114,13 @@
_gc_policy_counters =
new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
- // Set up the GCTaskManager
- _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
-
if (UseParallelOldGC && !PSParallelCompact::initialize()) {
return JNI_ENOMEM;
}
+ // Set up WorkGang
+ _workers.initialize_workers();
+
return JNI_OK;
}
@@ -602,11 +600,11 @@
}
void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
- PSScavenge::gc_task_manager()->threads_do(tc);
+ ParallelScavengeHeap::heap()->workers().threads_do(tc);
}
void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
- PSScavenge::gc_task_manager()->print_threads_on(st);
+ ParallelScavengeHeap::heap()->workers().print_worker_threads_on(st);
}
void ParallelScavengeHeap::print_tracing_info() const {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -37,6 +37,7 @@
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
#include "memory/metaspace.hpp"
#include "utilities/growableArray.hpp"
@@ -44,7 +45,6 @@
class AdjoiningGenerations;
class GCHeapSummary;
-class GCTaskManager;
class MemoryManager;
class MemoryPool;
class PSAdaptiveSizePolicy;
@@ -68,9 +68,6 @@
AdjoiningGenerations* _gens;
unsigned int _death_march_count;
- // The task manager
- static GCTaskManager* _gc_task_manager;
-
GCMemoryManager* _young_manager;
GCMemoryManager* _old_manager;
@@ -78,6 +75,8 @@
MemoryPool* _survivor_pool;
MemoryPool* _old_pool;
+ WorkGang _workers;
+
virtual void initialize_serviceability();
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
@@ -99,7 +98,11 @@
_old_manager(NULL),
_eden_pool(NULL),
_survivor_pool(NULL),
- _old_pool(NULL) { }
+ _old_pool(NULL),
+ _workers("GC Thread",
+ ParallelGCThreads,
+ true /* are_GC_task_threads */,
+ false /* are_ConcurrentGC_threads */) { }
// For use by VM operations
enum CollectionType {
@@ -129,8 +132,6 @@
static ParallelScavengeHeap* heap();
- static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
-
CardTableBarrierSet* barrier_set();
PSCardTable* card_table();
@@ -252,6 +253,10 @@
GCMemoryManager* old_gc_manager() const { return _old_manager; }
GCMemoryManager* young_gc_manager() const { return _young_manager; }
+
+ WorkGang& workers() {
+ return _workers;
+ }
};
// Class that can be used to print information about the
--- a/src/hotspot/share/gc/parallel/parallel_globals.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/parallel_globals.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,12 +62,6 @@
"limiter (a number between 0-100)") \
range(0, 100) \
\
- develop(bool, TraceGCTaskManager, false, \
- "Trace actions of the GC task manager") \
- \
- develop(bool, TraceGCTaskQueue, false, \
- "Trace actions of the GC task queues") \
- \
develop(bool, TraceParallelOldGCMarkingPhase, false, \
"Trace marking phase in ParallelOldGC") \
\
--- a/src/hotspot/share/gc/parallel/pcTasks.cpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,254 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/pcTasks.hpp"
-#include "gc/parallel/psCompactionManager.inline.hpp"
-#include "gc/parallel/psParallelCompact.inline.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "logging/log.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/objArrayKlass.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "runtime/jniHandles.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/management.hpp"
-#include "utilities/stack.inline.hpp"
-
-//
-// ThreadRootsMarkingTask
-//
-
-void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- ResourceMark rm;
-
- ParCompactionManager* cm =
- ParCompactionManager::gc_thread_compaction_manager(which);
-
- PCMarkAndPushClosure mark_and_push_closure(cm);
- MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
-
- _thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
-
- // Do the real work
- cm->follow_marking_stacks();
-}
-
-
-void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- ParCompactionManager* cm =
- ParCompactionManager::gc_thread_compaction_manager(which);
- PCMarkAndPushClosure mark_and_push_closure(cm);
-
- switch (_root_type) {
- case universe:
- Universe::oops_do(&mark_and_push_closure);
- break;
-
- case jni_handles:
- JNIHandles::oops_do(&mark_and_push_closure);
- break;
-
- case threads:
- {
- ResourceMark rm;
- MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
- Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
- }
- break;
-
- case object_synchronizer:
- ObjectSynchronizer::oops_do(&mark_and_push_closure);
- break;
-
- case management:
- Management::oops_do(&mark_and_push_closure);
- break;
-
- case jvmti:
- JvmtiExport::oops_do(&mark_and_push_closure);
- break;
-
- case system_dictionary:
- SystemDictionary::oops_do(&mark_and_push_closure);
- break;
-
- case class_loader_data: {
- CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_strong);
- ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
- }
- break;
-
- case code_cache:
- // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
- //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
- AOTLoader::oops_do(&mark_and_push_closure);
- break;
-
- default:
- fatal("Unknown root type");
- }
-
- // Do the real work
- cm->follow_marking_stacks();
-}
-
-
-//
-// RefProcTaskProxy
-//
-
-void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
-{
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- ParCompactionManager* cm =
- ParCompactionManager::gc_thread_compaction_manager(which);
- PCMarkAndPushClosure mark_and_push_closure(cm);
- ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
- _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
- mark_and_push_closure, follow_stack_closure);
-}
-
-//
-// RefProcTaskExecutor
-//
-
-void RefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers)
-{
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- uint active_gc_threads = heap->gc_task_manager()->active_workers();
- assert(active_gc_threads == ergo_workers,
- "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
- ergo_workers, active_gc_threads);
- OopTaskQueueSet* qset = ParCompactionManager::stack_array();
- TaskTerminator terminator(active_gc_threads, qset);
-
- GCTaskQueue* q = GCTaskQueue::create();
- for(uint i=0; i<active_gc_threads; i++) {
- q->enqueue(new RefProcTaskProxy(task, i));
- }
- if (task.marks_oops_alive() && (active_gc_threads>1)) {
- for (uint j=0; j<active_gc_threads; j++) {
- q->enqueue(new StealMarkingTask(terminator.terminator()));
- }
- }
- PSParallelCompact::gc_task_manager()->execute_and_wait(q);
-}
-
-//
-// StealMarkingTask
-//
-
-StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
- _terminator(t) {}
-
-void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- ParCompactionManager* cm =
- ParCompactionManager::gc_thread_compaction_manager(which);
-
- oop obj = NULL;
- ObjArrayTask task;
- do {
- while (ParCompactionManager::steal_objarray(which, task)) {
- cm->follow_array((objArrayOop)task.obj(), task.index());
- cm->follow_marking_stacks();
- }
- while (ParCompactionManager::steal(which, obj)) {
- cm->follow_contents(obj);
- cm->follow_marking_stacks();
- }
- } while (!terminator()->offer_termination());
-}
-
-//
-// CompactionWithStealingTask
-//
-
-CompactionWithStealingTask::CompactionWithStealingTask(ParallelTaskTerminator* t):
- _terminator(t) {}
-
-void CompactionWithStealingTask::do_it(GCTaskManager* manager, uint which) {
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- ParCompactionManager* cm =
- ParCompactionManager::gc_thread_compaction_manager(which);
-
- // Drain the stacks that have been preloaded with regions
- // that are ready to fill.
-
- cm->drain_region_stacks();
-
- guarantee(cm->region_stack()->is_empty(), "Not empty");
-
- size_t region_index = 0;
-
- while(true) {
- if (ParCompactionManager::steal(which, region_index)) {
- PSParallelCompact::fill_and_update_region(cm, region_index);
- cm->drain_region_stacks();
- } else {
- if (terminator()->offer_termination()) {
- break;
- }
- // Go around again.
- }
- }
- return;
-}
-
-UpdateDensePrefixTask::UpdateDensePrefixTask(
- PSParallelCompact::SpaceId space_id,
- size_t region_index_start,
- size_t region_index_end) :
- _space_id(space_id), _region_index_start(region_index_start),
- _region_index_end(region_index_end) {}
-
-void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
-
- ParCompactionManager* cm =
- ParCompactionManager::gc_thread_compaction_manager(which);
-
- PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
- _space_id,
- _region_index_start,
- _region_index_end);
-}
--- a/src/hotspot/share/gc/parallel/pcTasks.hpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PCTASKS_HPP
-#define SHARE_GC_PARALLEL_PCTASKS_HPP
-
-#include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/psParallelCompact.hpp"
-#include "gc/parallel/psTasks.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-
-
-// Tasks for parallel compaction of the old generation
-//
-// Tasks are created and enqueued on a task queue. The
-// tasks for parallel old collector for marking objects
-// are MarkFromRootsTask and ThreadRootsMarkingTask.
-//
-// MarkFromRootsTask's are created
-// with a root group (e.g., jni_handles) and when the do_it()
-// method of a MarkFromRootsTask is executed, it starts marking
-// form it's root group.
-//
-// ThreadRootsMarkingTask's are created for each Java thread. When
-// the do_it() method of a ThreadRootsMarkingTask is executed, it
-// starts marking from the thread's roots.
-//
-// The enqueueing of the MarkFromRootsTask and ThreadRootsMarkingTask
-// do little more than create the task and put it on a queue. The
-// queue is a GCTaskQueue and threads steal tasks from this GCTaskQueue.
-//
-// In addition to the MarkFromRootsTask and ThreadRootsMarkingTask
-// tasks there are StealMarkingTask tasks. The StealMarkingTask's
-// steal a reference from the marking stack of another
-// thread and transitively marks the object of the reference
-// and internal references. After successfully stealing a reference
-// and marking it, the StealMarkingTask drains its marking stack
-// stack before attempting another steal.
-//
-// ThreadRootsMarkingTask
-//
-// This task marks from the roots of a single thread. This task
-// enables marking of thread roots in parallel.
-//
-
-class ParallelTaskTerminator;
-
-class ThreadRootsMarkingTask : public GCTask {
- private:
- Thread* _thread;
-
- public:
- ThreadRootsMarkingTask(Thread* root) : _thread(root) {}
-
- char* name() { return (char *)"thread-roots-marking-task"; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-
-//
-// MarkFromRootsTask
-//
-// This task marks from all the roots to all live
-// objects.
-//
-//
-
-class MarkFromRootsTask : public GCTask {
- public:
- enum RootType {
- universe = 1,
- jni_handles = 2,
- threads = 3,
- object_synchronizer = 4,
- management = 5,
- jvmti = 6,
- system_dictionary = 7,
- class_loader_data = 8,
- code_cache = 9
- };
- private:
- RootType _root_type;
- public:
- MarkFromRootsTask(RootType value) : _root_type(value) {}
-
- char* name() { return (char *)"mark-from-roots-task"; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-//
-// RefProcTaskProxy
-//
-// This task is used as a proxy to parallel reference processing tasks .
-//
-
-class RefProcTaskProxy : public GCTask {
- typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
- ProcessTask & _rp_task;
- uint _work_id;
-public:
- RefProcTaskProxy(ProcessTask & rp_task, uint work_id)
- : _rp_task(rp_task),
- _work_id(work_id)
- { }
-
-private:
- virtual char* name() { return (char *)"Process referents by policy in parallel"; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-
-//
-// RefProcTaskExecutor
-//
-// Task executor is an interface for the reference processor to run
-// tasks using GCTaskManager.
-//
-
-class RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
- virtual void execute(ProcessTask& task, uint ergo_workers);
-};
-
-
-//
-// StealMarkingTask
-//
-// This task is used to distribute work to idle threads.
-//
-
-class StealMarkingTask : public GCTask {
- private:
- ParallelTaskTerminator* const _terminator;
- private:
-
- public:
- char* name() { return (char *)"steal-marking-task"; }
-
- StealMarkingTask(ParallelTaskTerminator* t);
-
- ParallelTaskTerminator* terminator() { return _terminator; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-//
-// CompactionWithStealingTask
-//
-// This task is used to distribute work to idle threads.
-//
-
-class CompactionWithStealingTask : public GCTask {
- private:
- ParallelTaskTerminator* const _terminator;
- public:
- CompactionWithStealingTask(ParallelTaskTerminator* t);
-
- char* name() { return (char *)"steal-region-task"; }
- ParallelTaskTerminator* terminator() { return _terminator; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-//
-// UpdateDensePrefixTask
-//
-// This task is used to update the dense prefix
-// of a space.
-//
-
-class UpdateDensePrefixTask : public GCTask {
- private:
- PSParallelCompact::SpaceId _space_id;
- size_t _region_index_start;
- size_t _region_index_end;
-
- public:
- char* name() { return (char *)"update-dense_prefix-task"; }
-
- UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
- size_t region_index_start,
- size_t region_index_end);
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-#endif // SHARE_GC_PARALLEL_PCTASKS_HPP
--- a/src/hotspot/share/gc/parallel/psCardTable.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psCardTable.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,13 +23,11 @@
*/
#include "precompiled.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
-#include "gc/parallel/psTasks.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/access.inline.hpp"
@@ -128,6 +126,38 @@
// when the space is empty, fix the calculation of
// end_card to allow sp_top == sp->bottom().
+// The generation (old gen) is divided into slices, which are further
+// subdivided into stripes, with one stripe per GC thread. The size of
+// a stripe is a constant, ssize.
+//
+// +===============+ slice 0
+// | stripe 0 |
+// +---------------+
+// | stripe 1 |
+// +---------------+
+// | stripe 2 |
+// +---------------+
+// | stripe 3 |
+// +===============+ slice 1
+// | stripe 0 |
+// +---------------+
+// | stripe 1 |
+// +---------------+
+// | stripe 2 |
+// +---------------+
+// | stripe 3 |
+// +===============+ slice 2
+// ...
+//
+// In this case there are 4 threads, so 4 stripes. A GC thread first works on
+// its stripe within slice 0 and then moves to its stripe in the next slice
+// until it has exceeded the top of the generation. The distance to stripe in
+// the next slice is calculated based on the number of stripes. The next
+// stripe is at ssize * number_of_stripes (= slice_stride).. So after
+// finishing stripe 0 in slice 0, the thread finds the stripe 0 in slice1 by
+// adding slice_stride to the start of stripe 0 in slice 0 to get to the start
+// of stride 0 in slice 1.
+
void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
MutableSpace* sp,
HeapWord* space_top,
--- a/src/hotspot/share/gc/parallel/psCardTable.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psCardTable.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -31,7 +31,6 @@
class MutableSpace;
class ObjectStartArray;
class PSPromotionManager;
-class GCTaskQueue;
class PSCardTable: public CardTable {
private:
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/objectStartArray.hpp"
#include "gc/parallel/parMarkBitMap.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
@@ -68,12 +67,12 @@
}
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
- assert(PSParallelCompact::gc_task_manager() != NULL,
+ assert(ParallelScavengeHeap::heap() != NULL,
"Needed for initialization");
_mark_bitmap = mbm;
- uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
+ uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
assert(_manager_array == NULL, "Attempt to initialize twice");
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
@@ -100,12 +99,12 @@
_manager_array[parallel_gc_threads] = new ParCompactionManager();
guarantee(_manager_array[parallel_gc_threads] != NULL,
"Could not create ParCompactionManager");
- assert(PSParallelCompact::gc_task_manager()->workers() != 0,
+ assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
"Not initialized?");
}
void ParCompactionManager::reset_all_bitmap_query_caches() {
- uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
+ uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
for (uint i=0; i<=parallel_gc_threads; i++) {
_manager_array[i]->reset_bitmap_query_cache();
}
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -43,7 +43,9 @@
friend class CompactionWithStealingTask;
friend class UpdateAndFillClosure;
friend class RefProcTaskExecutor;
- friend class IdleGCTask;
+ friend class PCRefProcTask;
+ friend class MarkFromRootsTask;
+ friend class UpdateDensePrefixAndCompactionTask;
public:
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -75,7 +75,7 @@
// The object forwarding code is duplicated. Factor this out!!!!!
//
// This method "precompacts" objects inside its space to dest. It places forwarding
-// pointers into markOops for use by adjust_pointers. If "dest" should overflow, we
+// pointers into markWords for use by adjust_pointers. If "dest" should overflow, we
// finish by compacting into our own space.
void PSMarkSweepDecorator::precompact() {
@@ -113,8 +113,8 @@
const intx interval = PrefetchScanIntervalInBytes;
while (q < t) {
- assert(oop(q)->mark_raw()->is_marked() || oop(q)->mark_raw()->is_unlocked() ||
- oop(q)->mark_raw()->has_bias_pattern(),
+ assert(oop(q)->mark_raw().is_marked() || oop(q)->mark_raw().is_unlocked() ||
+ oop(q)->mark_raw().has_bias_pattern(),
"these are the only valid states during a mark sweep");
if (oop(q)->is_gc_marked()) {
/* prefetch beyond q */
@@ -259,7 +259,7 @@
if (allowed_deadspace_words >= deadlength) {
allowed_deadspace_words -= deadlength;
CollectedHeap::fill_with_object(q, deadlength);
- oop(q)->set_mark_raw(oop(q)->mark_raw()->set_marked());
+ oop(q)->set_mark_raw(oop(q)->mark_raw().set_marked());
assert((int) deadlength == oop(q)->size(), "bad filler object size");
// Recall that we required "q == compaction_top".
return true;
@@ -350,7 +350,7 @@
q = t;
} else {
// $$$ Funky
- q = (HeapWord*) oop(_first_dead)->mark_raw()->decode_pointer();
+ q = (HeapWord*) oop(_first_dead)->mark_raw().decode_pointer();
}
}
@@ -361,7 +361,7 @@
if (!oop(q)->is_gc_marked()) {
// mark is pointer to next marked oop
debug_only(prev_q = q);
- q = (HeapWord*) oop(q)->mark_raw()->decode_pointer();
+ q = (HeapWord*) oop(q)->mark_raw().decode_pointer();
assert(q > prev_q, "we should be moving forward through memory");
} else {
// prefetch beyond q
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -30,16 +30,15 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/parMarkBitMap.inline.hpp"
-#include "gc/parallel/pcTasks.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psCompactionManager.inline.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
+#include "gc/parallel/psRootType.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcCause.hpp"
@@ -55,6 +54,8 @@
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "gc/shared/weakProcessor.hpp"
+#include "gc/shared/workerPolicy.hpp"
+#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
@@ -1016,9 +1017,6 @@
DEBUG_ONLY(mark_bitmap()->verify_clear();)
DEBUG_ONLY(summary_data().verify_clear();)
- // Have worker threads release resources the next time they run a task.
- gc_task_manager()->release_all_resources();
-
ParCompactionManager::reset_all_bitmap_query_caches();
}
@@ -1783,15 +1781,17 @@
// Get the compaction manager reserved for the VM thread.
ParCompactionManager* const vmthread_cm =
- ParCompactionManager::manager_array(gc_task_manager()->workers());
+ ParCompactionManager::manager_array(ParallelScavengeHeap::heap()->workers().total_workers());
{
ResourceMark rm;
HandleMark hm;
- // Set the number of GC threads to be used in this collection
- gc_task_manager()->set_active_gang();
- gc_task_manager()->task_idle_workers();
+ const uint active_workers =
+ WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(),
+ ParallelScavengeHeap::heap()->workers().active_workers(),
+ Threads::number_of_non_daemon_threads());
+ ParallelScavengeHeap::heap()->workers().update_active_workers(active_workers);
GCTraceCPUTime tcpu;
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
@@ -1928,7 +1928,6 @@
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
heap->update_counters();
- gc_task_manager()->release_idle_workers();
heap->post_full_gc_dump(&_gc_timer);
}
@@ -1967,7 +1966,6 @@
log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
marking_start.ticks(), compaction_start.ticks(),
collection_exit.ticks());
- gc_task_manager()->print_task_time_stamps();
#ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts();
@@ -1991,7 +1989,7 @@
assert(young_gen->virtual_space()->alignment() ==
old_gen->virtual_space()->alignment(), "alignments do not match");
- // We also return false when it's a heterogenous heap because old generation cannot absorb data from eden
+ // We also return false when it's a heterogeneous heap because old generation cannot absorb data from eden
// when it is allocated on different memory (example, nv-dimm) than young.
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) ||
ParallelArguments::is_heterogeneous_heap()) {
@@ -2072,20 +2070,174 @@
return true;
}
-GCTaskManager* const PSParallelCompact::gc_task_manager() {
- assert(ParallelScavengeHeap::gc_task_manager() != NULL,
- "shouldn't return NULL");
- return ParallelScavengeHeap::gc_task_manager();
-}
-
class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
private:
- GCTaskQueue* _q;
+ uint _worker_id;
public:
- PCAddThreadRootsMarkingTaskClosure(GCTaskQueue* q) : _q(q) { }
- void do_thread(Thread* t) {
- _q->enqueue(new ThreadRootsMarkingTask(t));
+ PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
+ void do_thread(Thread* thread) {
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ ResourceMark rm;
+
+ ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
+
+ PCMarkAndPushClosure mark_and_push_closure(cm);
+ MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
+
+ thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
+
+ // Do the real work
+ cm->follow_marking_stacks();
+ }
+};
+
+static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_id) {
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ ParCompactionManager* cm =
+ ParCompactionManager::gc_thread_compaction_manager(worker_id);
+ PCMarkAndPushClosure mark_and_push_closure(cm);
+
+ switch (root_type) {
+ case ParallelRootType::universe:
+ Universe::oops_do(&mark_and_push_closure);
+ break;
+
+ case ParallelRootType::jni_handles:
+ JNIHandles::oops_do(&mark_and_push_closure);
+ break;
+
+ case ParallelRootType::object_synchronizer:
+ ObjectSynchronizer::oops_do(&mark_and_push_closure);
+ break;
+
+ case ParallelRootType::management:
+ Management::oops_do(&mark_and_push_closure);
+ break;
+
+ case ParallelRootType::jvmti:
+ JvmtiExport::oops_do(&mark_and_push_closure);
+ break;
+
+ case ParallelRootType::system_dictionary:
+ SystemDictionary::oops_do(&mark_and_push_closure);
+ break;
+
+ case ParallelRootType::class_loader_data:
+ {
+ CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_strong);
+ ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
+ }
+ break;
+
+ case ParallelRootType::code_cache:
+ // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
+ //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
+ AOTLoader::oops_do(&mark_and_push_closure);
+ break;
+
+ case ParallelRootType::sentinel:
+ DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
+ fatal("Bad enumeration value: %u", root_type);
+ break;
+ }
+
+ // Do the real work
+ cm->follow_marking_stacks();
+}
+
+static void steal_marking_work(ParallelTaskTerminator& terminator, uint worker_id) {
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ ParCompactionManager* cm =
+ ParCompactionManager::gc_thread_compaction_manager(worker_id);
+
+ oop obj = NULL;
+ ObjArrayTask task;
+ do {
+ while (ParCompactionManager::steal_objarray(worker_id, task)) {
+ cm->follow_array((objArrayOop)task.obj(), task.index());
+ cm->follow_marking_stacks();
+ }
+ while (ParCompactionManager::steal(worker_id, obj)) {
+ cm->follow_contents(obj);
+ cm->follow_marking_stacks();
+ }
+ } while (!terminator.offer_termination());
+}
+
+class MarkFromRootsTask : public AbstractGangTask {
+ typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+ StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
+ SequentialSubTasksDone _subtasks;
+ TaskTerminator _terminator;
+ uint _active_workers;
+
+public:
+ MarkFromRootsTask(uint active_workers) :
+ AbstractGangTask("MarkFromRootsTask"),
+ _strong_roots_scope(active_workers),
+ _subtasks(),
+ _terminator(active_workers, ParCompactionManager::stack_array()),
+ _active_workers(active_workers) {
+ _subtasks.set_n_threads(active_workers);
+ _subtasks.set_n_tasks(ParallelRootType::sentinel);
+ }
+
+ virtual void work(uint worker_id) {
+ for (uint task = 0; _subtasks.try_claim_task(task); /*empty*/ ) {
+ mark_from_roots_work(static_cast<ParallelRootType::Value>(task), worker_id);
+ }
+ _subtasks.all_tasks_completed();
+
+ PCAddThreadRootsMarkingTaskClosure closure(worker_id);
+ Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
+
+ if (_active_workers > 1) {
+ steal_marking_work(*_terminator.terminator(), worker_id);
+ }
+ }
+};
+
+class PCRefProcTask : public AbstractGangTask {
+ typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+ ProcessTask& _task;
+ uint _ergo_workers;
+ TaskTerminator _terminator;
+
+public:
+ PCRefProcTask(ProcessTask& task, uint ergo_workers) :
+ AbstractGangTask("PCRefProcTask"),
+ _task(task),
+ _ergo_workers(ergo_workers),
+ _terminator(_ergo_workers, ParCompactionManager::stack_array()) {
+ }
+
+ virtual void work(uint worker_id) {
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ ParCompactionManager* cm =
+ ParCompactionManager::gc_thread_compaction_manager(worker_id);
+ PCMarkAndPushClosure mark_and_push_closure(cm);
+ ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
+ _task.work(worker_id, *PSParallelCompact::is_alive_closure(),
+ mark_and_push_closure, follow_stack_closure);
+
+ steal_marking_work(*_terminator.terminator(), worker_id);
+ }
+};
+
+class RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+ void execute(ProcessTask& process_task, uint ergo_workers) {
+ assert(ParallelScavengeHeap::heap()->workers().active_workers() == ergo_workers,
+ "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
+ ergo_workers, ParallelScavengeHeap::heap()->workers().active_workers());
+
+ PCRefProcTask task(process_task, ergo_workers);
+ ParallelScavengeHeap::heap()->workers().run_task(&task);
}
};
@@ -2096,10 +2248,7 @@
GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- uint parallel_gc_threads = heap->gc_task_manager()->workers();
- uint active_gc_threads = heap->gc_task_manager()->active_workers();
- TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
- TaskTerminator terminator(active_gc_threads, qset);
+ uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
PCMarkAndPushClosure mark_and_push_closure(cm);
ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
@@ -2110,29 +2259,8 @@
{
GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
- ParallelScavengeHeap::ParStrongRootsScope psrs;
-
- GCTaskQueue* q = GCTaskQueue::create();
-
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
- // We scan the thread roots in parallel
- PCAddThreadRootsMarkingTaskClosure cl(q);
- Threads::java_threads_and_vm_thread_do(&cl);
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
- q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
-
- if (active_gc_threads > 1) {
- for (uint j = 0; j < active_gc_threads; j++) {
- q->enqueue(new StealMarkingTask(terminator.terminator()));
- }
- }
-
- gc_task_manager()->execute_and_wait(q);
+ MarkFromRootsTask task(active_gc_threads);
+ ParallelScavengeHeap::heap()->workers().run_task(&task);
}
// Process reference objects found during marking
@@ -2261,13 +2389,12 @@
}
};
-void PSParallelCompact::prepare_region_draining_tasks(GCTaskQueue* q,
- uint parallel_gc_threads)
+void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
{
GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
// Find the threads that are active
- unsigned int which = 0;
+ uint worker_id = 0;
// Find all regions that are available (can be filled immediately) and
// distribute them to the thread stacks. The iteration is done in reverse
@@ -2275,7 +2402,6 @@
const ParallelCompactData& sd = PSParallelCompact::summary_data();
- which = 0;
// id + 1 is used to test termination so unsigned can
// be used with an old_space_id == 0.
FillableRegionLogger region_logger;
@@ -2290,12 +2416,12 @@
for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
if (sd.region(cur)->claim_unsafe()) {
- ParCompactionManager* cm = ParCompactionManager::manager_array(which);
+ ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id);
cm->region_stack()->push(cur);
region_logger.handle(cur);
// Assign regions to tasks in round-robin fashion.
- if (++which == parallel_gc_threads) {
- which = 0;
+ if (++worker_id == parallel_gc_threads) {
+ worker_id = 0;
}
}
}
@@ -2303,10 +2429,40 @@
}
}
+class TaskQueue : StackObj {
+ volatile uint _counter;
+ uint _size;
+ uint _insert_index;
+ PSParallelCompact::UpdateDensePrefixTask* _backing_array;
+public:
+ explicit TaskQueue(uint size) : _counter(0), _size(size), _insert_index(0), _backing_array(NULL) {
+ _backing_array = NEW_C_HEAP_ARRAY(PSParallelCompact::UpdateDensePrefixTask, _size, mtGC);
+ }
+ ~TaskQueue() {
+ assert(_counter >= _insert_index, "not all queue elements were claimed");
+ FREE_C_HEAP_ARRAY(T, _backing_array);
+ }
+
+ void push(const PSParallelCompact::UpdateDensePrefixTask& value) {
+ assert(_insert_index < _size, "too small backing array");
+ _backing_array[_insert_index++] = value;
+ }
+
+ bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
+ uint claimed = Atomic::add(1u, &_counter) - 1; // -1 is so that we start with zero
+ if (claimed < _insert_index) {
+ reference = _backing_array[claimed];
+ return true;
+ } else {
+ return false;
+ }
+ }
+};
+
#define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
-void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
- uint parallel_gc_threads) {
+void PSParallelCompact::enqueue_dense_prefix_tasks(TaskQueue& task_queue,
+ uint parallel_gc_threads) {
GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
ParallelCompactData& sd = PSParallelCompact::summary_data();
@@ -2369,35 +2525,22 @@
// region_index_end is not processed
size_t region_index_end = MIN2(region_index_start + regions_per_thread,
region_index_end_dense_prefix);
- q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
- region_index_start,
- region_index_end));
+ task_queue.push(UpdateDensePrefixTask(SpaceId(space_id),
+ region_index_start,
+ region_index_end));
region_index_start = region_index_end;
}
}
// This gets any part of the dense prefix that did not
// fit evenly.
if (region_index_start < region_index_end_dense_prefix) {
- q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
- region_index_start,
- region_index_end_dense_prefix));
+ task_queue.push(UpdateDensePrefixTask(SpaceId(space_id),
+ region_index_start,
+ region_index_end_dense_prefix));
}
}
}
-void PSParallelCompact::enqueue_region_stealing_tasks(
- GCTaskQueue* q,
- ParallelTaskTerminator* terminator_ptr,
- uint parallel_gc_threads) {
- GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
-
- // Once a thread has drained it's stack, it should try to steal regions from
- // other threads.
- for (uint j = 0; j < parallel_gc_threads; j++) {
- q->enqueue(new CompactionWithStealingTask(terminator_ptr));
- }
-}
-
#ifdef ASSERT
// Write a histogram of the number of times the block table was filled for a
// region.
@@ -2440,26 +2583,87 @@
}
#endif // #ifdef ASSERT
+static void compaction_with_stealing_work(ParallelTaskTerminator* terminator, uint worker_id) {
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ ParCompactionManager* cm =
+ ParCompactionManager::gc_thread_compaction_manager(worker_id);
+
+ // Drain the stacks that have been preloaded with regions
+ // that are ready to fill.
+
+ cm->drain_region_stacks();
+
+ guarantee(cm->region_stack()->is_empty(), "Not empty");
+
+ size_t region_index = 0;
+
+ while (true) {
+ if (ParCompactionManager::steal(worker_id, region_index)) {
+ PSParallelCompact::fill_and_update_region(cm, region_index);
+ cm->drain_region_stacks();
+ } else {
+ if (terminator->offer_termination()) {
+ break;
+ }
+ // Go around again.
+ }
+ }
+ return;
+}
+
+class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
+ typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+ TaskQueue& _tq;
+ TaskTerminator _terminator;
+ uint _active_workers;
+
+public:
+ UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
+ AbstractGangTask("UpdateDensePrefixAndCompactionTask"),
+ _tq(tq),
+ _terminator(active_workers, ParCompactionManager::region_array()),
+ _active_workers(active_workers) {
+ }
+ virtual void work(uint worker_id) {
+ ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
+
+ for (PSParallelCompact::UpdateDensePrefixTask task; _tq.try_claim(task); /* empty */) {
+ PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
+ task._space_id,
+ task._region_index_start,
+ task._region_index_end);
+ }
+
+ // Once a thread has drained it's stack, it should try to steal regions from
+ // other threads.
+ compaction_with_stealing_work(_terminator.terminator(), worker_id);
+ }
+};
+
void PSParallelCompact::compact() {
GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
old_gen->start_array()->reset();
- uint parallel_gc_threads = heap->gc_task_manager()->workers();
- uint active_gc_threads = heap->gc_task_manager()->active_workers();
- TaskQueueSetSuper* qset = ParCompactionManager::region_array();
- TaskTerminator terminator(active_gc_threads, qset);
-
- GCTaskQueue* q = GCTaskQueue::create();
- prepare_region_draining_tasks(q, active_gc_threads);
- enqueue_dense_prefix_tasks(q, active_gc_threads);
- enqueue_region_stealing_tasks(q, terminator.terminator(), active_gc_threads);
+ uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
+
+ // for [0..last_space_id)
+ // for [0..active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)
+ // push
+ // push
+ //
+ // max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)
+ TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1));
+ prepare_region_draining_tasks(active_gc_threads);
+ enqueue_dense_prefix_tasks(task_queue, active_gc_threads);
{
GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
- gc_task_manager()->execute_and_wait(q);
+ UpdateDensePrefixAndCompactionTask task(task_queue, active_gc_threads);
+ ParallelScavengeHeap::heap()->workers().run_task(&task);
#ifdef ASSERT
// Verify that all regions have been processed before the deferred updates.
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -40,8 +40,7 @@
class ParCompactionManager;
class ParallelTaskTerminator;
class PSParallelCompact;
-class GCTaskManager;
-class GCTaskQueue;
+class PreGCValues;
class MoveAndUpdateClosure;
class RefProcTaskExecutor;
class ParallelOldTracer;
@@ -913,6 +912,8 @@
// region that can be put on the ready list. The regions are atomically added
// and removed from the ready list.
+class TaskQueue;
+
class PSParallelCompact : AllStatic {
public:
// Convenient access to type names.
@@ -925,6 +926,24 @@
from_space_id, to_space_id, last_space_id
} SpaceId;
+ struct UpdateDensePrefixTask : public CHeapObj<mtGC> {
+ SpaceId _space_id;
+ size_t _region_index_start;
+ size_t _region_index_end;
+
+ UpdateDensePrefixTask() :
+ _space_id(SpaceId(0)),
+ _region_index_start(0),
+ _region_index_end(0) {}
+
+ UpdateDensePrefixTask(SpaceId space_id,
+ size_t region_index_start,
+ size_t region_index_end) :
+ _space_id(space_id),
+ _region_index_start(region_index_start),
+ _region_index_end(region_index_end) {}
+ };
+
public:
// Inline closure decls
//
@@ -1050,19 +1069,12 @@
static void compact();
// Add available regions to the stack and draining tasks to the task queue.
- static void prepare_region_draining_tasks(GCTaskQueue* q,
- uint parallel_gc_threads);
+ static void prepare_region_draining_tasks(uint parallel_gc_threads);
// Add dense prefix update tasks to the task queue.
- static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
+ static void enqueue_dense_prefix_tasks(TaskQueue& task_queue,
uint parallel_gc_threads);
- // Add region stealing tasks to the task queue.
- static void enqueue_region_stealing_tasks(
- GCTaskQueue* q,
- ParallelTaskTerminator* terminator_ptr,
- uint parallel_gc_threads);
-
// If objects are left in eden after a collection, try to move the boundary
// and absorb them into the old gen. Returns true if eden was emptied.
static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
@@ -1101,9 +1113,6 @@
static unsigned int total_invocations() { return _total_invocations; }
static CollectorCounters* counters() { return _counters; }
- // Used to add tasks
- static GCTaskManager* const gc_task_manager();
-
// Marking support
static inline bool mark_obj(oop obj);
static inline bool is_marked(oop obj);
--- a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -83,7 +83,7 @@
// so they can always fill with an array.
HeapWord* tlab_end = end() + filler_header_size;
typeArrayOop filler_oop = (typeArrayOop) top();
- filler_oop->set_mark_raw(markOopDesc::prototype());
+ filler_oop->set_mark_raw(markWord::prototype());
filler_oop->set_klass(Universe::intArrayKlassObj());
const size_t array_length =
pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
--- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.inline.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/mutableSpace.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psOldGen.hpp"
@@ -240,52 +239,8 @@
_preserved_marks = preserved_marks;
}
-class ParRestoreGCTask : public GCTask {
-private:
- const uint _id;
- PreservedMarksSet* const _preserved_marks_set;
- volatile size_t* const _total_size_addr;
-
-public:
- virtual char* name() {
- return (char*) "preserved mark restoration task";
- }
-
- virtual void do_it(GCTaskManager* manager, uint which){
- _preserved_marks_set->get(_id)->restore_and_increment(_total_size_addr);
- }
-
- ParRestoreGCTask(uint id,
- PreservedMarksSet* preserved_marks_set,
- volatile size_t* total_size_addr)
- : _id(id),
- _preserved_marks_set(preserved_marks_set),
- _total_size_addr(total_size_addr) { }
-};
-
-class PSRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor {
-private:
- GCTaskManager* _gc_task_manager;
-
-public:
- PSRestorePreservedMarksTaskExecutor(GCTaskManager* gc_task_manager)
- : _gc_task_manager(gc_task_manager) { }
-
- void restore(PreservedMarksSet* preserved_marks_set,
- volatile size_t* total_size_addr) {
- // GCTask / GCTaskQueue are ResourceObjs
- ResourceMark rm;
-
- GCTaskQueue* q = GCTaskQueue::create();
- for (uint i = 0; i < preserved_marks_set->num(); i += 1) {
- q->enqueue(new ParRestoreGCTask(i, preserved_marks_set, total_size_addr));
- }
- _gc_task_manager->execute_and_wait(q);
- }
-};
-
void PSPromotionManager::restore_preserved_marks() {
- PSRestorePreservedMarksTaskExecutor task_executor(PSScavenge::gc_task_manager());
+ SharedRestorePreservedMarksTaskExecutor task_executor(&ParallelScavengeHeap::heap()->workers());
_preserved_marks_set->restore(&task_executor);
}
@@ -390,7 +345,7 @@
}
}
-oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
+oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) {
assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
// Attempt to CAS in the header.
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -51,7 +51,10 @@
class PSPromotionManager {
friend class PSScavenge;
+ friend class ScavengeRootsTask;
friend class PSRefProcTaskExecutor;
+ friend class PSRefProcTask;
+
private:
static PaddedEnd<PSPromotionManager>* _manager_array;
static OopStarTaskQueueSet* _stack_array_depth;
@@ -175,7 +178,7 @@
// Promotion methods
template<bool promote_immediately> oop copy_to_survivor_space(oop o);
- oop oop_promotion_failed(oop obj, markOop obj_mark);
+ oop oop_promotion_failed(oop obj, markWord obj_mark);
void reset();
void register_preserved_marks(PreservedMarks* preserved_marks);
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -157,16 +157,16 @@
// NOTE! We must be very careful with any methods that access the mark
// in o. There may be multiple threads racing on it, and it may be forwarded
// at any time. Do not use oop methods for accessing the mark!
- markOop test_mark = o->mark_raw();
+ markWord test_mark = o->mark_raw();
// The same test as "o->is_forwarded()"
- if (!test_mark->is_marked()) {
+ if (!test_mark.is_marked()) {
bool new_obj_is_tenured = false;
size_t new_obj_size = o->size();
// Find the objects age, MT safe.
- uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
- test_mark->displaced_mark_helper()->age() : test_mark->age();
+ uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+ test_mark.displaced_mark_helper().age() : test_mark.age();
if (!promote_immediately) {
// Try allocating obj in to-space (unless too old)
@@ -260,7 +260,7 @@
assert(new_obj == o->forwardee(), "Sanity");
// Increment age if obj still in new generation. Now that
- // we're dealing with a markOop that cannot change, it is
+ // we're dealing with a markWord that cannot change, it is
// okay to use the non mt safe oop methods.
if (!new_obj_is_tenured) {
new_obj->incr_age();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psRootType.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_PARALLEL_PSROOTTYPE_HPP
+#define SHARE_GC_PARALLEL_PSROOTTYPE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
+
+class ParallelRootType : public AllStatic {
+public:
+ // Order and compactness of this enum is of importance.
+ // The order reflects the order these roots are to be processed,
+ // We do not want any holes in the enum as we enumerate these values by incrementing them.
+ enum Value {
+ universe,
+ jni_handles,
+ object_synchronizer,
+ management,
+ system_dictionary,
+ class_loader_data,
+ jvmti,
+ code_cache,
+ //"threads" are handled in parallel as a special case
+ sentinel
+ };
+};
+
+#endif /* SHARE_GC_PARALLEL_PSROOTTYPE_HPP */
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -23,17 +23,19 @@
*/
#include "precompiled.hpp"
+#include "aot/aotLoader.hpp"
+#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psClosure.inline.hpp"
+#include "gc/parallel/psCompactionManager.hpp"
#include "gc/parallel/psMarkSweepProxy.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
+#include "gc/parallel/psRootType.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
-#include "gc/parallel/psTasks.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
@@ -45,8 +47,11 @@
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
+#include "gc/shared/scavengableNMethods.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "gc/shared/weakProcessor.hpp"
+#include "gc/shared/workerPolicy.hpp"
+#include "gc/shared/workgroup.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "logging/log.hpp"
@@ -58,6 +63,7 @@
#include "runtime/threadCritical.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vmOperations.hpp"
+#include "services/management.hpp"
#include "services/memoryService.hpp"
#include "utilities/stack.inline.hpp"
@@ -75,6 +81,87 @@
ParallelScavengeTracer PSScavenge::_gc_tracer;
CollectorCounters* PSScavenge::_counters = NULL;
+static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) {
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
+ PSScavengeRootsClosure roots_closure(pm);
+ PSPromoteRootsClosure roots_to_old_closure(pm);
+
+ switch (root_type) {
+ case ParallelRootType::universe:
+ Universe::oops_do(&roots_closure);
+ break;
+
+ case ParallelRootType::jni_handles:
+ JNIHandles::oops_do(&roots_closure);
+ break;
+
+ case ParallelRootType::object_synchronizer:
+ ObjectSynchronizer::oops_do(&roots_closure);
+ break;
+
+ case ParallelRootType::system_dictionary:
+ SystemDictionary::oops_do(&roots_closure);
+ break;
+
+ case ParallelRootType::class_loader_data:
+ {
+ PSScavengeCLDClosure cld_closure(pm);
+ ClassLoaderDataGraph::cld_do(&cld_closure);
+ }
+ break;
+
+ case ParallelRootType::management:
+ Management::oops_do(&roots_closure);
+ break;
+
+ case ParallelRootType::jvmti:
+ JvmtiExport::oops_do(&roots_closure);
+ break;
+
+ case ParallelRootType::code_cache:
+ {
+ MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
+ ScavengableNMethods::nmethods_do(&code_closure);
+ AOTLoader::oops_do(&roots_closure);
+ }
+ break;
+
+ case ParallelRootType::sentinel:
+ DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
+ fatal("Bad enumeration value: %u", root_type);
+ break;
+ }
+
+ // Do the real work
+ pm->drain_stacks(false);
+}
+
+static void steal_work(ParallelTaskTerminator& terminator, uint worker_id) {
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ PSPromotionManager* pm =
+ PSPromotionManager::gc_thread_promotion_manager(worker_id);
+ pm->drain_stacks(true);
+ guarantee(pm->stacks_empty(),
+ "stacks should be empty at this point");
+
+ while (true) {
+ StarTask p;
+ if (PSPromotionManager::steal_depth(worker_id, p)) {
+ TASKQUEUE_STATS_ONLY(pm->record_steal(p));
+ pm->process_popped_location_depth(p);
+ pm->drain_stacks_depth(true);
+ } else {
+ if (terminator.offer_termination()) {
+ break;
+ }
+ }
+ }
+ guarantee(pm->stacks_empty(), "stacks should be empty at this point");
+}
+
// Define before use
class PSIsAliveClosure: public BoolObjectClosure {
public:
@@ -125,57 +212,42 @@
}
};
-class PSRefProcTaskProxy: public GCTask {
- typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
- ProcessTask & _rp_task;
- uint _work_id;
-public:
- PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
- : _rp_task(rp_task),
- _work_id(work_id)
- { }
-
-private:
- virtual char* name() { return (char *)"Process referents by policy in parallel"; }
- virtual void do_it(GCTaskManager* manager, uint which);
+class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+ virtual void execute(ProcessTask& process_task, uint ergo_workers);
};
-void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
-{
- PSPromotionManager* promotion_manager =
- PSPromotionManager::gc_thread_promotion_manager(which);
- assert(promotion_manager != NULL, "sanity check");
- PSKeepAliveClosure keep_alive(promotion_manager);
- PSEvacuateFollowersClosure evac_followers(promotion_manager);
- PSIsAliveClosure is_alive;
- _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
-}
+class PSRefProcTask : public AbstractGangTask {
+ typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+ TaskTerminator _terminator;
+ ProcessTask& _task;
+ uint _active_workers;
+
+public:
+ PSRefProcTask(ProcessTask& task, uint active_workers)
+ : AbstractGangTask("PSRefProcTask"),
+ _terminator(active_workers, PSPromotionManager::stack_array_depth()),
+ _task(task),
+ _active_workers(active_workers) {
+ }
-class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
- virtual void execute(ProcessTask& task, uint ergo_workers);
+ virtual void work(uint worker_id) {
+ PSPromotionManager* promotion_manager =
+ PSPromotionManager::gc_thread_promotion_manager(worker_id);
+ assert(promotion_manager != NULL, "sanity check");
+ PSKeepAliveClosure keep_alive(promotion_manager);
+ PSEvacuateFollowersClosure evac_followers(promotion_manager);
+ PSIsAliveClosure is_alive;
+ _task.work(worker_id, is_alive, keep_alive, evac_followers);
+
+ if (_task.marks_oops_alive() && _active_workers > 1) {
+ steal_work(*_terminator.terminator(), worker_id);
+ }
+ }
};
-void PSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers)
-{
- GCTaskQueue* q = GCTaskQueue::create();
- GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
- uint active_workers = manager->active_workers();
-
- assert(active_workers == ergo_workers,
- "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
- ergo_workers, active_workers);
-
- for(uint i=0; i < active_workers; i++) {
- q->enqueue(new PSRefProcTaskProxy(task, i));
- }
- TaskTerminator terminator(active_workers,
- (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
- if (task.marks_oops_alive() && active_workers > 1) {
- for (uint j = 0; j < active_workers; j++) {
- q->enqueue(new StealTask(terminator.terminator()));
- }
- }
- manager->execute_and_wait(q);
+void PSRefProcTaskExecutor::execute(ProcessTask& process_task, uint ergo_workers) {
+ PSRefProcTask task(process_task, ergo_workers);
+ ParallelScavengeHeap::heap()->workers().run_task(&task);
}
// This method contains all heap specific policy for invoking scavenge.
@@ -221,14 +293,96 @@
return full_gc_done;
}
-class PSAddThreadRootsTaskClosure : public ThreadClosure {
-private:
- GCTaskQueue* _q;
+class PSThreadRootsTaskClosure : public ThreadClosure {
+ uint _worker_id;
+public:
+ PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { }
+ virtual void do_thread(Thread* thread) {
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
+
+ PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id);
+ PSScavengeRootsClosure roots_closure(pm);
+ MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
+
+ thread->oops_do(&roots_closure, &roots_in_blobs);
+
+ // Do the real work
+ pm->drain_stacks(false);
+ }
+};
+
+class ScavengeRootsTask : public AbstractGangTask {
+ StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
+ SequentialSubTasksDone _subtasks;
+ PSOldGen* _old_gen;
+ HeapWord* _gen_top;
+ uint _active_workers;
+ bool _is_empty;
+ TaskTerminator _terminator;
public:
- PSAddThreadRootsTaskClosure(GCTaskQueue* q) : _q(q) { }
- void do_thread(Thread* t) {
- _q->enqueue(new ThreadRootsTask(t));
+ ScavengeRootsTask(PSOldGen* old_gen,
+ HeapWord* gen_top,
+ uint active_workers,
+ bool is_empty) :
+ AbstractGangTask("ScavengeRootsTask"),
+ _strong_roots_scope(active_workers),
+ _subtasks(),
+ _old_gen(old_gen),
+ _gen_top(gen_top),
+ _active_workers(active_workers),
+ _is_empty(is_empty),
+ _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
+ _subtasks.set_n_threads(active_workers);
+ _subtasks.set_n_tasks(ParallelRootType::sentinel);
+ }
+
+ virtual void work(uint worker_id) {
+ ResourceMark rm;
+
+ if (!_is_empty) {
+ // There are only old-to-young pointers if there are objects
+ // in the old gen.
+
+ assert(_old_gen != NULL, "Sanity");
+ // There are no old-to-young pointers if the old gen is empty.
+ assert(!_old_gen->object_space()->is_empty(), "Should not be called is there is no work");
+ assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity");
+ assert(worker_id < ParallelGCThreads, "Sanity");
+
+ {
+ PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
+ PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
+
+ card_table->scavenge_contents_parallel(_old_gen->start_array(),
+ _old_gen->object_space(),
+ _gen_top,
+ pm,
+ worker_id,
+ _active_workers);
+
+ // Do the real work
+ pm->drain_stacks(false);
+ }
+ }
+
+ for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) {
+ scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id);
+ }
+ _subtasks.all_tasks_completed();
+
+ PSThreadRootsTaskClosure closure(worker_id);
+ Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
+
+
+ // If active_workers can exceed 1, add a steal_work().
+ // PSPromotionManager::drain_stacks_depth() does not fully drain its
+ // stacks and expects a steal_work() to complete the draining if
+ // ParallelGCThreads is > 1.
+
+ if (_active_workers > 1) {
+ steal_work(*_terminator.terminator() , worker_id);
+ }
}
};
@@ -277,7 +431,7 @@
heap->print_heap_before_gc();
heap->trace_heap_before_gc(&_gc_tracer);
- assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
+ assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
// Fill in TLABs
@@ -339,15 +493,11 @@
// straying into the promotion labs.
HeapWord* old_top = old_gen->object_space()->top();
- // Release all previously held resources
- gc_task_manager()->release_all_resources();
-
- // Set the number of GC threads to be used in this collection
- gc_task_manager()->set_active_gang();
- gc_task_manager()->task_idle_workers();
- // Get the active number of workers here and use that value
- // throughout the methods.
- uint active_workers = gc_task_manager()->active_workers();
+ const uint active_workers =
+ WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(),
+ ParallelScavengeHeap::heap()->workers().active_workers(),
+ Threads::number_of_non_daemon_threads());
+ ParallelScavengeHeap::heap()->workers().update_active_workers(active_workers);
PSPromotionManager::pre_scavenge();
@@ -355,44 +505,9 @@
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{
GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
- ParallelScavengeHeap::ParStrongRootsScope psrs;
- GCTaskQueue* q = GCTaskQueue::create();
-
- if (!old_gen->object_space()->is_empty()) {
- // There are only old-to-young pointers if there are objects
- // in the old gen.
- uint stripe_total = active_workers;
- for(uint i=0; i < stripe_total; i++) {
- q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
- }
- }
-
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
- // We scan the thread roots in parallel
- PSAddThreadRootsTaskClosure cl(q);
- Threads::java_threads_and_vm_thread_do(&cl);
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
- q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
-
- TaskTerminator terminator(active_workers,
- (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
- // If active_workers can exceed 1, add a StrealTask.
- // PSPromotionManager::drain_stacks_depth() does not fully drain its
- // stacks and expects a StealTask to complete the draining if
- // ParallelGCThreads is > 1.
- if (gc_task_manager()->workers() > 1) {
- for (uint j = 0; j < active_workers; j++) {
- q->enqueue(new StealTask(terminator.terminator()));
- }
- }
-
- gc_task_manager()->execute_and_wait(q);
+ ScavengeRootsTask task(old_gen, old_top, active_workers, old_gen->object_space()->is_empty());
+ ParallelScavengeHeap::heap()->workers().run_task(&task);
}
scavenge_midpoint.update();
@@ -603,8 +718,6 @@
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
heap->update_counters();
-
- gc_task_manager()->release_idle_workers();
}
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
@@ -620,7 +733,6 @@
log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
scavenge_entry.ticks(), scavenge_midpoint.ticks(),
scavenge_exit.ticks());
- gc_task_manager()->print_task_time_stamps();
#ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts();
@@ -698,13 +810,6 @@
return result;
}
- // Used to add tasks
-GCTaskManager* const PSScavenge::gc_task_manager() {
- assert(ParallelScavengeHeap::gc_task_manager() != NULL,
- "shouldn't return NULL");
- return ParallelScavengeHeap::gc_task_manager();
-}
-
// Adaptive size policy support. When the young generation/old generation
// boundary moves, _young_generation_boundary must be reset
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
@@ -718,8 +823,8 @@
// Arguments must have been parsed
if (AlwaysTenure || NeverTenure) {
- assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
- "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
+ assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
+ "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
_tenuring_threshold = MaxTenuringThreshold;
} else {
// We want to smooth out our startup times for the AdaptiveSizePolicy
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -33,8 +33,6 @@
#include "oops/oop.hpp"
#include "utilities/stack.hpp"
-class GCTaskManager;
-class GCTaskQueue;
class OopStack;
class ReferenceProcessor;
class ParallelScavengeHeap;
@@ -111,8 +109,6 @@
assert(_ref_processor != NULL, "Sanity");
return _ref_processor;
}
- // Used to add tasks
- static GCTaskManager* const gc_task_manager();
// The promotion managers tell us if they encountered overflow
static void set_survivor_overflow(bool state) {
_survivor_overflow = state;
--- a/src/hotspot/share/gc/parallel/psTasks.cpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/parallelScavengeHeap.inline.hpp"
-#include "gc/parallel/psCardTable.hpp"
-#include "gc/parallel/psClosure.inline.hpp"
-#include "gc/parallel/psPromotionManager.hpp"
-#include "gc/parallel/psPromotionManager.inline.hpp"
-#include "gc/parallel/psScavenge.inline.hpp"
-#include "gc/parallel/psTasks.hpp"
-#include "gc/shared/scavengableNMethods.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "memory/iterator.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/management.hpp"
-
-//
-// ScavengeRootsTask
-//
-
-void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
- PSScavengeRootsClosure roots_closure(pm);
- PSPromoteRootsClosure roots_to_old_closure(pm);
-
- switch (_root_type) {
- case universe:
- Universe::oops_do(&roots_closure);
- break;
-
- case jni_handles:
- JNIHandles::oops_do(&roots_closure);
- break;
-
- case threads:
- {
- ResourceMark rm;
- Threads::oops_do(&roots_closure, NULL);
- }
- break;
-
- case object_synchronizer:
- ObjectSynchronizer::oops_do(&roots_closure);
- break;
-
- case system_dictionary:
- SystemDictionary::oops_do(&roots_closure);
- break;
-
- case class_loader_data:
- {
- PSScavengeCLDClosure cld_closure(pm);
- ClassLoaderDataGraph::cld_do(&cld_closure);
- }
- break;
-
- case management:
- Management::oops_do(&roots_closure);
- break;
-
- case jvmti:
- JvmtiExport::oops_do(&roots_closure);
- break;
-
- case code_cache:
- {
- MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
- ScavengableNMethods::nmethods_do(&code_closure);
- AOTLoader::oops_do(&roots_closure);
- }
- break;
-
- default:
- fatal("Unknown root type");
- }
-
- // Do the real work
- pm->drain_stacks(false);
-}
-
-//
-// ThreadRootsTask
-//
-
-void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
- PSScavengeRootsClosure roots_closure(pm);
- MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
-
- _thread->oops_do(&roots_closure, &roots_in_blobs);
-
- // Do the real work
- pm->drain_stacks(false);
-}
-
-//
-// StealTask
-//
-
-StealTask::StealTask(ParallelTaskTerminator* t) :
- _terminator(t) {}
-
-void StealTask::do_it(GCTaskManager* manager, uint which) {
- assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
-
- PSPromotionManager* pm =
- PSPromotionManager::gc_thread_promotion_manager(which);
- pm->drain_stacks(true);
- guarantee(pm->stacks_empty(),
- "stacks should be empty at this point");
-
- while(true) {
- StarTask p;
- if (PSPromotionManager::steal_depth(which, p)) {
- TASKQUEUE_STATS_ONLY(pm->record_steal(p));
- pm->process_popped_location_depth(p);
- pm->drain_stacks_depth(true);
- } else {
- if (terminator()->offer_termination()) {
- break;
- }
- }
- }
- guarantee(pm->stacks_empty(), "stacks should be empty at this point");
-}
-
-//
-// OldToYoungRootsTask
-//
-
-void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
- // There are not old-to-young pointers if the old gen is empty.
- assert(!_old_gen->object_space()->is_empty(),
- "Should not be called is there is no work");
- assert(_old_gen != NULL, "Sanity");
- assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity");
- assert(_stripe_number < ParallelGCThreads, "Sanity");
-
- {
- PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
- PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
-
- card_table->scavenge_contents_parallel(_old_gen->start_array(),
- _old_gen->object_space(),
- _gen_top,
- pm,
- _stripe_number,
- _stripe_total);
-
- // Do the real work
- pm->drain_stacks(false);
- }
-}
--- a/src/hotspot/share/gc/parallel/psTasks.hpp Mon Aug 19 20:31:10 2019 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSTASKS_HPP
-#define SHARE_GC_PARALLEL_PSTASKS_HPP
-
-#include "utilities/growableArray.hpp"
-
-//
-// psTasks.hpp is a collection of GCTasks used by the
-// parallelScavenge collector.
-//
-
-class GCTask;
-class OopClosure;
-class OopStack;
-class ObjectStartArray;
-class ParallelTaskTerminator;
-class MutableSpace;
-class PSOldGen;
-class Thread;
-class VMThread;
-
-//
-// ScavengeRootsTask
-//
-// This task scans all the roots of a given type.
-//
-//
-
-class ScavengeRootsTask : public GCTask {
- public:
- enum RootType {
- universe = 1,
- jni_handles = 2,
- threads = 3,
- object_synchronizer = 4,
- system_dictionary = 5,
- class_loader_data = 6,
- management = 7,
- jvmti = 8,
- code_cache = 9
- };
- private:
- RootType _root_type;
- public:
- ScavengeRootsTask(RootType value) : _root_type(value) {}
-
- char* name() { return (char *)"scavenge-roots-task"; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-//
-// ThreadRootsTask
-//
-// This task scans the roots of a single thread. This task
-// enables scanning of thread roots in parallel.
-//
-
-class ThreadRootsTask : public GCTask {
- private:
- Thread* _thread;
-
- public:
- ThreadRootsTask(Thread* root) : _thread(root) {}
-
- char* name() { return (char *)"thread-roots-task"; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-//
-// StealTask
-//
-// This task is used to distribute work to idle threads.
-//
-
-class StealTask : public GCTask {
- private:
- ParallelTaskTerminator* const _terminator;
- public:
- char* name() { return (char *)"steal-task"; }
-
- StealTask(ParallelTaskTerminator* t);
-
- ParallelTaskTerminator* terminator() { return _terminator; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-//
-// OldToYoungRootsTask
-//
-// This task is used to scan old to young roots in parallel
-//
-// A GC thread executing this tasks divides the generation (old gen)
-// into slices and takes a stripe in the slice as its part of the
-// work.
-//
-// +===============+ slice 0
-// | stripe 0 |
-// +---------------+
-// | stripe 1 |
-// +---------------+
-// | stripe 2 |
-// +---------------+
-// | stripe 3 |
-// +===============+ slice 1
-// | stripe 0 |
-// +---------------+
-// | stripe 1 |
-// +---------------+
-// | stripe 2 |
-// +---------------+
-// | stripe 3 |
-// +===============+ slice 2
-// ...
-//
-// A task is created for each stripe. In this case there are 4 tasks
-// created. A GC thread first works on its stripe within slice 0
-// and then moves to its stripe in the next slice until all stripes
-// exceed the top of the generation. Note that having fewer GC threads
-// than stripes works because all the tasks are executed so all stripes
-// will be covered. In this example if 4 tasks have been created to cover
-// all the stripes and there are only 3 threads, one of the threads will
-// get the tasks with the 4th stripe. However, there is a dependence in
-// PSCardTable::scavenge_contents_parallel() on the number
-// of tasks created. In scavenge_contents_parallel the distance
-// to the next stripe is calculated based on the number of tasks.
-// If the stripe width is ssize, a task's next stripe is at
-// ssize * number_of_tasks (= slice_stride). In this case after
-// finishing stripe 0 in slice 0, the thread finds the stripe 0 in slice1
-// by adding slice_stride to the start of stripe 0 in slice 0 to get
-// to the start of stride 0 in slice 1.
-
-class OldToYoungRootsTask : public GCTask {
- private:
- PSOldGen* _old_gen;
- HeapWord* _gen_top;
- uint _stripe_number;
- uint _stripe_total;
-
- public:
- OldToYoungRootsTask(PSOldGen *old_gen,
- HeapWord* gen_top,
- uint stripe_number,
- uint stripe_total) :
- _old_gen(old_gen),
- _gen_top(gen_top),
- _stripe_number(stripe_number),
- _stripe_total(stripe_total) { }
-
- char* name() { return (char *)"old-to-young-roots-task"; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-#endif // SHARE_GC_PARALLEL_PSTASKS_HPP
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -749,7 +749,7 @@
void PSYoungGen::compact() {
eden_mark_sweep()->compact(ZapUnusedHeapArea);
from_mark_sweep()->compact(ZapUnusedHeapArea);
- // Mark sweep stores preserved markOops in to space, don't disturb!
+ // Mark sweep stores preserved markWords in to space, don't disturb!
to_mark_sweep()->compact(false);
}
--- a/src/hotspot/share/gc/serial/markSweep.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/serial/markSweep.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -49,7 +49,7 @@
Stack<ObjArrayTask, mtGC> MarkSweep::_objarray_stack;
Stack<oop, mtGC> MarkSweep::_preserved_oop_stack;
-Stack<markOop, mtGC> MarkSweep::_preserved_mark_stack;
+Stack<markWord, mtGC> MarkSweep::_preserved_mark_stack;
size_t MarkSweep::_preserved_count = 0;
size_t MarkSweep::_preserved_count_max = 0;
PreservedMark* MarkSweep::_preserved_marks = NULL;
@@ -132,7 +132,7 @@
T heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
- if (!obj->mark_raw()->is_marked()) {
+ if (!obj->mark_raw().is_marked()) {
mark_object(obj);
follow_object(obj);
}
@@ -152,9 +152,9 @@
}
// We preserve the mark which should be replaced at the end and the location
-// that it will go. Note that the object that this markOop belongs to isn't
+// that it will go. Note that the object that this markWord belongs to isn't
// currently at that address but it will be after phase4
-void MarkSweep::preserve_mark(oop obj, markOop mark) {
+void MarkSweep::preserve_mark(oop obj, markWord mark) {
// We try to store preserved marks in the to space of the new generation since
// this is storage which should be available. Most of the time this should be
// sufficient space for the marks we need to preserve but if it isn't we fall
@@ -204,7 +204,7 @@
// deal with the overflow
while (!_preserved_oop_stack.is_empty()) {
oop obj = _preserved_oop_stack.pop();
- markOop mark = _preserved_mark_stack.pop();
+ markWord mark = _preserved_mark_stack.pop();
obj->set_mark_raw(mark);
}
}
--- a/src/hotspot/share/gc/serial/markSweep.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/serial/markSweep.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -100,7 +100,7 @@
static Stack<ObjArrayTask, mtGC> _objarray_stack;
// Space for storing/restoring mark word
- static Stack<markOop, mtGC> _preserved_mark_stack;
+ static Stack<markWord, mtGC> _preserved_mark_stack;
static Stack<oop, mtGC> _preserved_oop_stack;
static size_t _preserved_count;
static size_t _preserved_count_max;
@@ -137,7 +137,7 @@
static STWGCTimer* gc_timer() { return _gc_timer; }
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
- static void preserve_mark(oop p, markOop mark);
+ static void preserve_mark(oop p, markWord mark);
// Save the mark word so it can be restored later
static void adjust_marks(); // Adjust the pointers in the preserved marks table
static void restore_marks(); // Restore the marks that we saved in preserve_mark
@@ -199,10 +199,10 @@
class PreservedMark {
private:
oop _obj;
- markOop _mark;
+ markWord _mark;
public:
- void init(oop obj, markOop mark) {
+ void init(oop obj, markWord mark) {
_obj = obj;
_mark = mark;
}
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -37,10 +37,10 @@
inline void MarkSweep::mark_object(oop obj) {
// some marks may contain information we need to preserve so we store them away
// and overwrite the mark. We'll restore it at the end of markSweep.
- markOop mark = obj->mark_raw();
- obj->set_mark_raw(markOopDesc::prototype()->set_marked());
+ markWord mark = obj->mark_raw();
+ obj->set_mark_raw(markWord::prototype().set_marked());
- if (mark->must_be_preserved(obj)) {
+ if (mark.must_be_preserved(obj)) {
preserve_mark(obj, mark);
}
}
@@ -49,7 +49,7 @@
T heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
- if (!obj->mark_raw()->is_marked()) {
+ if (!obj->mark_raw().is_marked()) {
mark_object(obj);
_marking_stack.push(obj);
}
@@ -78,11 +78,11 @@
oop obj = CompressedOops::decode_not_null(heap_oop);
assert(Universe::heap()->is_in(obj), "should be in heap");
- oop new_obj = oop(obj->mark_raw()->decode_pointer());
+ oop new_obj = oop(obj->mark_raw().decode_pointer());
- assert(new_obj != NULL || // is forwarding ptr?
- obj->mark_raw() == markOopDesc::prototype() || // not gc marked?
- (UseBiasedLocking && obj->mark_raw()->has_bias_pattern()),
+ assert(new_obj != NULL || // is forwarding ptr?
+ obj->mark_raw() == markWord::prototype() || // not gc marked?
+ (UseBiasedLocking && obj->mark_raw().has_bias_pattern()),
// not gc marked?
"should be forwarded");
--- a/src/hotspot/share/gc/shared/ageTable.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/ageTable.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -78,8 +78,8 @@
uint result;
if (AlwaysTenure || NeverTenure) {
- assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
- "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
+ assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
+ "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
result = MaxTenuringThreshold;
} else {
size_t total = 0;
--- a/src/hotspot/share/gc/shared/ageTable.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/ageTable.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -41,7 +41,7 @@
public:
// constants
- enum { table_size = markOopDesc::max_age + 1 };
+ enum { table_size = markWord::max_age + 1 };
// instance variables
size_t sizes[table_size];
--- a/src/hotspot/share/gc/shared/gc_globals.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -680,12 +680,6 @@
product(bool, DisableExplicitGC, false, \
"Ignore calls to System.gc()") \
\
- product(bool, BindGCTaskThreadsToCPUs, false, \
- "Bind GCTaskThreads to CPUs if possible") \
- \
- product(bool, UseGCTaskAffinity, false, \
- "Use worker affinity when asking for GCTasks") \
- \
product(bool, PrintGC, false, \
"Print message at garbage collection. " \
"Deprecated, use -Xlog:gc instead.") \
@@ -817,12 +811,12 @@
\
product(uintx, MaxTenuringThreshold, 15, \
"Maximum value for tenuring threshold") \
- range(0, markOopDesc::max_age + 1) \
+ range(0, markWord::max_age + 1) \
constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \
\
product(uintx, InitialTenuringThreshold, 7, \
"Initial value for tenuring threshold") \
- range(0, markOopDesc::max_age + 1) \
+ range(0, markWord::max_age + 1) \
constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \
\
product(uintx, TargetSurvivorRatio, 50, \
--- a/src/hotspot/share/gc/shared/generation.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/generation.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -68,6 +68,12 @@
return gch->old_gen_spec()->init_size();
}
+// This is for CMS. It returns stable monotonic used space size.
+// Remove this when CMS is removed.
+size_t Generation::used_stable() const {
+ return used();
+}
+
size_t Generation::max_capacity() const {
return reserved().byte_size();
}
@@ -178,7 +184,7 @@
}
oop Generation::par_promote(int thread_num,
- oop obj, markOop m, size_t word_sz) {
+ oop obj, markWord m, size_t word_sz) {
// Could do a bad general impl here that gets a lock. But no.
ShouldNotCallThis();
return NULL;
--- a/src/hotspot/share/gc/shared/generation.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/generation.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -156,6 +156,7 @@
virtual size_t capacity() const = 0; // The maximum number of object bytes the
// generation can currently hold.
virtual size_t used() const = 0; // The number of used bytes in the gen.
+ virtual size_t used_stable() const; // The number of used bytes for memory monitoring tools.
virtual size_t free() const = 0; // The number of free bytes in the gen.
// Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
@@ -300,7 +301,7 @@
// word of "obj" may have been overwritten with a forwarding pointer, and
// also taking care to copy the klass pointer *last*. Returns the new
// object if successful, or else NULL.
- virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
+ virtual oop par_promote(int thread_num, oop obj, markWord m, size_t word_sz);
// Informs the current generation that all par_promote_alloc's in the
// collection have been completed; any supporting data structures can be
--- a/src/hotspot/share/gc/shared/memAllocator.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/memAllocator.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -143,7 +143,6 @@
// Clear unhandled oops for memory allocation. Memory allocation might
// not take out a lock if from tlab, so clear here.
Thread* THREAD = _thread;
- CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
@@ -388,7 +387,7 @@
oopDesc::set_mark_raw(mem, _klass->prototype_header());
} else {
// May be bootstrapping
- oopDesc::set_mark_raw(mem, markOopDesc::prototype());
+ oopDesc::set_mark_raw(mem, markWord::prototype());
}
// Need a release store to ensure array/class length, mark word, and
// object zeroing are visible before setting the klass non-NULL, for
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -32,16 +32,16 @@
void PreservedMarks::restore() {
while (!_stack.is_empty()) {
- const OopAndMarkOop elem = _stack.pop();
+ const OopAndMarkWord elem = _stack.pop();
elem.set_mark();
}
assert_empty();
}
void PreservedMarks::adjust_during_full_gc() {
- StackIterator<OopAndMarkOop, mtGC> iter(_stack);
+ StackIterator<OopAndMarkWord, mtGC> iter(_stack);
while (!iter.is_empty()) {
- OopAndMarkOop* elem = iter.next_addr();
+ OopAndMarkWord* elem = iter.next_addr();
oop obj = elem->get_oop();
if (obj->is_forwarded()) {
--- a/src/hotspot/share/gc/shared/preservedMarks.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/preservedMarks.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -35,28 +35,28 @@
class PreservedMarks {
private:
- class OopAndMarkOop {
+ class OopAndMarkWord {
private:
oop _o;
- markOop _m;
+ markWord _m;
public:
- OopAndMarkOop(oop obj, markOop m) : _o(obj), _m(m) { }
+ OopAndMarkWord(oop obj, markWord m) : _o(obj), _m(m) { }
oop get_oop() { return _o; }
inline void set_mark() const;
void set_oop(oop obj) { _o = obj; }
};
- typedef Stack<OopAndMarkOop, mtGC> OopAndMarkOopStack;
+ typedef Stack<OopAndMarkWord, mtGC> OopAndMarkWordStack;
- OopAndMarkOopStack _stack;
+ OopAndMarkWordStack _stack;
- inline bool should_preserve_mark(oop obj, markOop m) const;
+ inline bool should_preserve_mark(oop obj, markWord m) const;
public:
size_t size() const { return _stack.size(); }
- inline void push(oop obj, markOop m);
- inline void push_if_necessary(oop obj, markOop m);
+ inline void push(oop obj, markWord m);
+ inline void push_if_necessary(oop obj, markWord m);
// Iterate over the stack, restore all preserved marks, and
// reclaim the memory taken up by the stack segments.
void restore();
--- a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -30,17 +30,17 @@
#include "oops/oop.inline.hpp"
#include "utilities/stack.inline.hpp"
-inline bool PreservedMarks::should_preserve_mark(oop obj, markOop m) const {
- return m->must_be_preserved_for_promotion_failure(obj);
+inline bool PreservedMarks::should_preserve_mark(oop obj, markWord m) const {
+ return m.must_be_preserved_for_promotion_failure(obj);
}
-inline void PreservedMarks::push(oop obj, markOop m) {
+inline void PreservedMarks::push(oop obj, markWord m) {
assert(should_preserve_mark(obj, m), "pre-condition");
- OopAndMarkOop elem(obj, m);
+ OopAndMarkWord elem(obj, m);
_stack.push(elem);
}
-inline void PreservedMarks::push_if_necessary(oop obj, markOop m) {
+inline void PreservedMarks::push_if_necessary(oop obj, markWord m) {
if (should_preserve_mark(obj, m)) {
push(obj, m);
}
@@ -72,14 +72,14 @@
}
inline PreservedMarks::PreservedMarks()
- : _stack(OopAndMarkOopStack::default_segment_size(),
+ : _stack(OopAndMarkWordStack::default_segment_size(),
// This stack should be used very infrequently so there's
// no point in caching stack segments (there will be a
// waste of space most of the time). So we set the max
// cache size to 0.
0 /* max_cache_size */) { }
-void PreservedMarks::OopAndMarkOop::set_mark() const {
+void PreservedMarks::OopAndMarkWord::set_mark() const {
_o->set_mark_raw(_m);
}
--- a/src/hotspot/share/gc/shared/space.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/space.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -651,14 +651,14 @@
// allocate uninitialized int array
typeArrayOop t = (typeArrayOop) allocate(size);
assert(t != NULL, "allocation should succeed");
- t->set_mark_raw(markOopDesc::prototype());
+ t->set_mark_raw(markWord::prototype());
t->set_klass(Universe::intArrayKlassObj());
t->set_length((int)length);
} else {
assert(size == CollectedHeap::min_fill_size(),
"size for smallest fake object doesn't match");
instanceOop obj = (instanceOop) allocate(size);
- obj->set_mark_raw(markOopDesc::prototype());
+ obj->set_mark_raw(markWord::prototype());
obj->set_klass_gap(0);
obj->set_klass(SystemDictionary::Object_klass());
}
--- a/src/hotspot/share/gc/shared/space.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shared/space.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -117,7 +117,7 @@
_allowed_deadspace_words -= dead_length;
CollectedHeap::fill_with_object(dead_start, dead_length);
oop obj = oop(dead_start);
- obj->set_mark_raw(obj->mark_raw()->set_marked());
+ obj->set_mark_raw(obj->mark_raw().set_marked());
assert(dead_length == (size_t)obj->size(), "bad filler object size");
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
@@ -164,8 +164,8 @@
while (cur_obj < scan_limit) {
assert(!space->scanned_block_is_obj(cur_obj) ||
- oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() ||
- oop(cur_obj)->mark_raw()->has_bias_pattern(),
+ oop(cur_obj)->mark_raw().is_marked() || oop(cur_obj)->mark_raw().is_unlocked() ||
+ oop(cur_obj)->mark_raw().has_bias_pattern(),
"these are the only valid states during a mark sweep");
if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
// prefetch beyond cur_obj
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1458,9 +1458,9 @@
phase->register_new_node(markword, ctrl);
// Test if object is forwarded. This is the case if lowest two bits are set.
- Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markOopDesc::lock_mask_in_place));
+ Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markWord::lock_mask_in_place));
phase->register_new_node(masked, ctrl);
- Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markOopDesc::marked_value));
+ Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markWord::marked_value));
phase->register_new_node(cmp, ctrl);
// Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -61,7 +61,7 @@
r->print_on(&ss);
stringStream mw_ss;
- obj->mark()->print_on(&mw_ss);
+ obj->mark().print_on(&mw_ss);
ShenandoahMarkingContext* const ctx = heap->marking_context();
--- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -35,9 +35,9 @@
}
inline HeapWord* ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
- markOop mark = obj->mark_raw();
- if (mark->is_marked()) {
- return (HeapWord*) mark->clear_lock_bits();
+ markWord mark = obj->mark_raw();
+ if (mark.is_marked()) {
+ return (HeapWord*) mark.clear_lock_bits().to_pointer();
} else {
return (HeapWord*) obj;
}
@@ -49,21 +49,21 @@
}
inline bool ShenandoahForwarding::is_forwarded(oop obj) {
- return obj->mark_raw()->is_marked();
+ return obj->mark_raw().is_marked();
}
inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
- markOop old_mark = obj->mark_raw();
- if (old_mark->is_marked()) {
- return (oop) old_mark->clear_lock_bits();
+ markWord old_mark = obj->mark_raw();
+ if (old_mark.is_marked()) {
+ return oop(old_mark.clear_lock_bits().to_pointer());
}
- markOop new_mark = markOopDesc::encode_pointer_as_mark(update);
- markOop prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
+ markWord new_mark = markWord::encode_pointer_as_mark(update);
+ markWord prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
if (prev_mark == old_mark) {
return update;
} else {
- return (oop) prev_mark->clear_lock_bits();
+ return oop(prev_mark.clear_lock_bits().to_pointer());
}
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -105,7 +105,7 @@
out->cr();
out->print_cr("GC STATISTICS:");
out->print_cr(" \"(G)\" (gross) pauses include VM time: time to notify and block threads, do the pre-");
- out->print_cr(" and post-safepoint housekeeping. Use -XX:+PrintSafepointStatistics to dissect.");
+ out->print_cr(" and post-safepoint housekeeping. Use -Xlog:safepoint+stats to dissect.");
out->print_cr(" \"(N)\" (net) pauses are the times spent in the actual GC code.");
out->print_cr(" \"a\" is average time for each phase, look at levels to see if average makes sense.");
out->print_cr(" \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum).");
--- a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -47,17 +47,17 @@
"Only from a GC worker thread");
if (java_string->age() <= StringDeduplicationAgeThreshold) {
- const markOop mark = java_string->mark();
+ const markWord mark = java_string->mark();
// Having/had displaced header, too risk to deal with them, skip
- if (mark == markOopDesc::INFLATING() || mark->has_displaced_mark_helper()) {
+ if (mark == markWord::INFLATING() || mark.has_displaced_mark_helper()) {
return;
}
// Increase string age and enqueue it when it rearches age threshold
- markOop new_mark = mark->incr_age();
+ markWord new_mark = mark.incr_age();
if (mark == java_string->cas_set_mark(new_mark, mark)) {
- if (mark->age() == StringDeduplicationAgeThreshold) {
+ if (mark.age() == StringDeduplicationAgeThreshold) {
StringDedupQueue::push(ShenandoahWorkerSession::worker_id(), java_string);
}
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -34,6 +34,7 @@
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "utilities/debug.hpp"
ShenandoahPhaseTimings::Phase ShenandoahGCPhase::_current_phase = ShenandoahGCPhase::_invalid_phase;
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -666,17 +666,17 @@
BasicObjectLock* mon = &istate->monitor_base()[-1];
mon->set_obj(rcvr);
bool success = false;
- uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
- markOop mark = rcvr->mark();
- intptr_t hash = (intptr_t) markOopDesc::no_hash;
+ uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
+ markWord mark = rcvr->mark();
+ intptr_t hash = (intptr_t) markWord::no_hash;
// Implies UseBiasedLocking.
- if (mark->has_bias_pattern()) {
+ if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
- (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
- ~((uintptr_t) markOopDesc::age_mask_in_place);
+ (((uintptr_t)rcvr->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
+ ~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// Already biased towards this thread, nothing to do.
@@ -684,11 +684,11 @@
(* BiasedLocking::biased_lock_entry_count_addr())++;
}
success = true;
- } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+ } else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// Try to revoke bias.
- markOop header = rcvr->klass()->prototype_header();
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header = rcvr->klass()->prototype_header();
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
if (rcvr->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@@ -696,9 +696,9 @@
}
} else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
// Try to rebias.
- markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
- if (hash != markOopDesc::no_hash) {
- new_header = new_header->copy_set_hash(hash);
+ markWord new_header( (intptr_t) rcvr->klass()->prototype_header().value() | thread_ident);
+ if (hash != markWord::no_hash) {
+ new_header = new_header.copy_set_hash(hash);
}
if (rcvr->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@@ -710,15 +710,15 @@
success = true;
} else {
// Try to bias towards thread in case object is anonymously biased.
- markOop header = (markOop) ((uintptr_t) mark &
- ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
- (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header(mark.value() &
+ ((uintptr_t)markWord::biased_lock_mask_in_place |
+ (uintptr_t)markWord::age_mask_in_place | epoch_mask_in_place));
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
- markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+ markWord new_header(header.value() | thread_ident);
// Debugging hint.
- DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+ DEBUG_ONLY(mon->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (rcvr->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@@ -732,13 +732,13 @@
// Traditional lightweight locking.
if (!success) {
- markOop displaced = rcvr->mark()->set_unlocked();
+ markWord displaced = rcvr->mark().set_unlocked();
mon->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
- if (call_vm || rcvr->cas_set_mark((markOop)mon, displaced) != displaced) {
+ if (call_vm || rcvr->cas_set_mark(markWord::from_pointer(mon), displaced) != displaced) {
// Is it simple recursive case?
- if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
- mon->lock()->set_displaced_header(NULL);
+ if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
+ mon->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
}
@@ -851,18 +851,18 @@
assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
entry->set_obj(lockee);
bool success = false;
- uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
-
- markOop mark = lockee->mark();
- intptr_t hash = (intptr_t) markOopDesc::no_hash;
+ uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
+
+ markWord mark = lockee->mark();
+ intptr_t hash = (intptr_t) markWord::no_hash;
// implies UseBiasedLocking
- if (mark->has_bias_pattern()) {
+ if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
- (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
- ~((uintptr_t) markOopDesc::age_mask_in_place);
+ (((uintptr_t)lockee->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
+ ~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// already biased towards this thread, nothing to do
@@ -870,11 +870,11 @@
(* BiasedLocking::biased_lock_entry_count_addr())++;
}
success = true;
- } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+ } else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// try revoke bias
- markOop header = lockee->klass()->prototype_header();
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header = lockee->klass()->prototype_header();
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@@ -883,9 +883,9 @@
}
} else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
// try rebias
- markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
- if (hash != markOopDesc::no_hash) {
- new_header = new_header->copy_set_hash(hash);
+ markWord new_header( (intptr_t) lockee->klass()->prototype_header().value() | thread_ident);
+ if (hash != markWord::no_hash) {
+ new_header = new_header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@@ -897,14 +897,14 @@
success = true;
} else {
// try to bias towards thread in case object is anonymously biased
- markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
- (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header(mark.value() & ((uintptr_t)markWord::biased_lock_mask_in_place |
+ (uintptr_t)markWord::age_mask_in_place | epoch_mask_in_place));
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
- markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+ markWord new_header(header.value() | thread_ident);
// debugging hint
- DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+ DEBUG_ONLY(entry->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (lockee->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@@ -918,13 +918,13 @@
// traditional lightweight locking
if (!success) {
- markOop displaced = lockee->mark()->set_unlocked();
+ markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
- if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
+ if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
- if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
- entry->lock()->set_displaced_header(NULL);
+ if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
+ entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
@@ -1791,18 +1791,18 @@
if (entry != NULL) {
entry->set_obj(lockee);
int success = false;
- uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
-
- markOop mark = lockee->mark();
- intptr_t hash = (intptr_t) markOopDesc::no_hash;
+ uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
+
+ markWord mark = lockee->mark();
+ intptr_t hash = (intptr_t) markWord::no_hash;
// implies UseBiasedLocking
- if (mark->has_bias_pattern()) {
+ if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
- (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
- ~((uintptr_t) markOopDesc::age_mask_in_place);
+ (((uintptr_t)lockee->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
+ ~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// already biased towards this thread, nothing to do
@@ -1811,11 +1811,11 @@
}
success = true;
}
- else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+ else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// try revoke bias
- markOop header = lockee->klass()->prototype_header();
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header = lockee->klass()->prototype_header();
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@@ -1824,9 +1824,9 @@
}
else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
// try rebias
- markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
- if (hash != markOopDesc::no_hash) {
- new_header = new_header->copy_set_hash(hash);
+ markWord new_header( (intptr_t) lockee->klass()->prototype_header().value() | thread_ident);
+ if (hash != markWord::no_hash) {
+ new_header = new_header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@@ -1839,15 +1839,15 @@
}
else {
// try to bias towards thread in case object is anonymously biased
- markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
- (uintptr_t)markOopDesc::age_mask_in_place |
- epoch_mask_in_place));
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header(mark.value() & ((uintptr_t)markWord::biased_lock_mask_in_place |
+ (uintptr_t)markWord::age_mask_in_place |
+ epoch_mask_in_place));
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
- markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+ markWord new_header(header.value() | thread_ident);
// debugging hint
- DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+ DEBUG_ONLY(entry->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (lockee->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics)
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@@ -1861,13 +1861,13 @@
// traditional lightweight locking
if (!success) {
- markOop displaced = lockee->mark()->set_unlocked();
+ markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
- if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
+ if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
- if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
- entry->lock()->set_displaced_header(NULL);
+ if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
+ entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
@@ -1890,13 +1890,13 @@
while (most_recent != limit ) {
if ((most_recent)->obj() == lockee) {
BasicLock* lock = most_recent->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
most_recent->set_obj(NULL);
- if (!lockee->mark()->has_bias_pattern()) {
+ if (!lockee->mark().has_bias_pattern()) {
bool call_vm = UseHeavyMonitors;
// If it isn't recursive we either must swap old header or call the runtime
- if (header != NULL || call_vm) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL || call_vm) {
+ markWord old_header = markWord::encode(lock);
if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
most_recent->set_obj(lockee);
@@ -2182,7 +2182,7 @@
if (UseBiasedLocking) {
result->set_mark(ik->prototype_header());
} else {
- result->set_mark(markOopDesc::prototype());
+ result->set_mark(markWord::prototype());
}
result->set_klass_gap(0);
result->set_klass(ik);
@@ -3035,13 +3035,13 @@
oop lockee = end->obj();
if (lockee != NULL) {
BasicLock* lock = end->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
end->set_obj(NULL);
- if (!lockee->mark()->has_bias_pattern()) {
+ if (!lockee->mark().has_bias_pattern()) {
// If it isn't recursive we either must swap old header or call the runtime
- if (header != NULL) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL) {
+ markWord old_header = markWord::encode(lock);
if (lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
end->set_obj(lockee);
@@ -3110,14 +3110,14 @@
}
} else {
BasicLock* lock = base->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
base->set_obj(NULL);
- if (!rcvr->mark()->has_bias_pattern()) {
+ if (!rcvr->mark().has_bias_pattern()) {
base->set_obj(NULL);
// If it isn't recursive we either must swap old header or call the runtime
- if (header != NULL) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL) {
+ markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
base->set_obj(rcvr);
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1440,7 +1440,7 @@
method->set_signature_handler(_handlers->at(handler_index));
}
} else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ DEBUG_ONLY(Thread::current()->check_possible_safepoint());
// use generic signature handler
method->set_signature_handler(Interpreter::slow_signature_handler());
}
--- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -131,7 +131,7 @@
if (!_mark_bits->is_marked(pointee)) {
_mark_bits->mark_obj(pointee);
// is the pointee a sample object?
- if (NULL == pointee->mark()) {
+ if (NULL == pointee->mark().to_pointer()) {
add_chain(reference, pointee);
}
@@ -148,7 +148,7 @@
void BFSClosure::add_chain(const oop* reference, const oop pointee) {
assert(pointee != NULL, "invariant");
- assert(NULL == pointee->mark(), "invariant");
+ assert(NULL == pointee->mark().to_pointer(), "invariant");
Edge leak_edge(_current_parent, reference);
_edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
}
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -121,7 +121,7 @@
assert(_mark_bits->is_marked(pointee), "invariant");
// is the pointee a sample object?
- if (NULL == pointee->mark()) {
+ if (NULL == pointee->mark().to_pointer()) {
add_chain();
}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -233,8 +233,8 @@
StoredEdge* const leak_context_edge = put(edge->reference());
oop sample_object = edge->pointee();
assert(sample_object != NULL, "invariant");
- assert(NULL == sample_object->mark(), "invariant");
- sample_object->set_mark(markOop(leak_context_edge));
+ assert(NULL == sample_object->mark().to_pointer(), "invariant");
+ sample_object->set_mark(markWord::from_pointer(leak_context_edge));
return leak_context_edge;
}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -35,7 +35,7 @@
#include "runtime/handles.inline.hpp"
bool EdgeUtils::is_leak_edge(const Edge& edge) {
- return (const Edge*)edge.pointee()->mark() == &edge;
+ return (const Edge*)edge.pointee()->mark().to_pointer() == &edge;
}
static int field_offset(const StoredEdge& edge) {
--- a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -36,45 +36,45 @@
//
class ObjectSampleMarker : public StackObj {
private:
- class ObjectSampleMarkOop : public ResourceObj {
+ class ObjectSampleMarkWord : public ResourceObj {
friend class ObjectSampleMarker;
private:
oop _obj;
- markOop _mark_oop;
- ObjectSampleMarkOop(const oop obj,
- const markOop mark_oop) : _obj(obj),
- _mark_oop(mark_oop) {}
+ markWord _mark_word;
+ ObjectSampleMarkWord(const oop obj,
+ const markWord mark_word) : _obj(obj),
+ _mark_word(mark_word) {}
public:
- ObjectSampleMarkOop() : _obj(NULL), _mark_oop(NULL) {}
+ ObjectSampleMarkWord() : _obj(NULL), _mark_word(markWord::zero()) {}
};
- GrowableArray<ObjectSampleMarkOop>* _store;
+ GrowableArray<ObjectSampleMarkWord>* _store;
public:
ObjectSampleMarker() :
- _store(new GrowableArray<ObjectSampleMarkOop>(16)) {}
+ _store(new GrowableArray<ObjectSampleMarkWord>(16)) {}
~ObjectSampleMarker() {
assert(_store != NULL, "invariant");
- // restore the saved, original, markOop for sample objects
+ // restore the saved, original, markWord for sample objects
while (_store->is_nonempty()) {
- ObjectSampleMarkOop sample_oop = _store->pop();
- sample_oop._obj->set_mark(sample_oop._mark_oop);
- assert(sample_oop._obj->mark() == sample_oop._mark_oop, "invariant");
+ ObjectSampleMarkWord sample_oop = _store->pop();
+ sample_oop._obj->set_mark(sample_oop._mark_word);
+ assert(sample_oop._obj->mark() == sample_oop._mark_word, "invariant");
}
}
void mark(oop obj) {
assert(obj != NULL, "invariant");
- // save the original markOop
- _store->push(ObjectSampleMarkOop(obj, obj->mark()));
+ // save the original markWord
+ _store->push(ObjectSampleMarkWord(obj, obj->mark()));
// now we will "poison" the mark word of the sample object
// to the intermediate monitor INFLATING state.
// This is an "impossible" state during a safepoint,
// hence we will use it to quickly identify sample objects
// during the reachability search from gc roots.
- assert(NULL == markOopDesc::INFLATING(), "invariant");
- obj->set_mark(markOopDesc::INFLATING());
- assert(NULL == obj->mark(), "invariant");
+ assert(NULL == markWord::INFLATING().to_pointer(), "invariant");
+ obj->set_mark(markWord::INFLATING());
+ assert(NULL == obj->mark().to_pointer(), "invariant");
}
};
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -111,7 +111,7 @@
traceid gc_root_id = 0;
const Edge* edge = NULL;
if (SafepointSynchronize::is_at_safepoint()) {
- edge = (const Edge*)(*object_addr)->mark();
+ edge = (const Edge*)(*object_addr)->mark().to_pointer();
}
if (edge == NULL) {
// In order to dump out a representation of the event
--- a/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -27,42 +27,42 @@
#include "jfr/leakprofiler/utilities/saveRestore.hpp"
#include "oops/oop.inline.hpp"
-MarkOopContext::MarkOopContext() : _obj(NULL), _mark_oop(NULL) {}
+MarkWordContext::MarkWordContext() : _obj(NULL), _mark_word(markWord::zero()) {}
-MarkOopContext::MarkOopContext(const oop obj) : _obj(obj), _mark_oop(obj->mark()) {
- assert(_obj->mark() == _mark_oop, "invariant");
+MarkWordContext::MarkWordContext(const oop obj) : _obj(obj), _mark_word(obj->mark()) {
+ assert(_obj->mark() == _mark_word, "invariant");
// now we will "poison" the mark word of the object
// to the intermediate monitor INFLATING state.
// This is an "impossible" state during a safepoint,
// hence we will use it to quickly identify objects
// during the reachability search from gc roots.
- assert(NULL == markOopDesc::INFLATING(), "invariant");
- _obj->set_mark(markOopDesc::INFLATING());
- assert(NULL == obj->mark(), "invariant");
+ assert(markWord::zero() == markWord::INFLATING(), "invariant");
+ _obj->set_mark(markWord::INFLATING());
+ assert(markWord::zero() == obj->mark(), "invariant");
}
-MarkOopContext::~MarkOopContext() {
+MarkWordContext::~MarkWordContext() {
if (_obj != NULL) {
- _obj->set_mark(_mark_oop);
- assert(_obj->mark() == _mark_oop, "invariant");
+ _obj->set_mark(_mark_word);
+ assert(_obj->mark() == _mark_word, "invariant");
}
}
-MarkOopContext::MarkOopContext(const MarkOopContext& rhs) : _obj(NULL), _mark_oop(NULL) {
- swap(const_cast<MarkOopContext&>(rhs));
+MarkWordContext::MarkWordContext(const MarkWordContext& rhs) : _obj(NULL), _mark_word(markWord::zero()) {
+ swap(const_cast<MarkWordContext&>(rhs));
}
-void MarkOopContext::operator=(MarkOopContext rhs) {
+void MarkWordContext::operator=(MarkWordContext rhs) {
swap(rhs);
}
-void MarkOopContext::swap(MarkOopContext& rhs) {
+void MarkWordContext::swap(MarkWordContext& rhs) {
oop temp_obj = rhs._obj;
- markOop temp_mark_oop = rhs._mark_oop;
+ markWord temp_mark_word = rhs._mark_word;
rhs._obj = _obj;
- rhs._mark_oop = _mark_oop;
+ rhs._mark_word = _mark_word;
_obj = temp_obj;
- _mark_oop = temp_mark_oop;
+ _mark_word = temp_mark_word;
}
CLDClaimContext::CLDClaimContext() : _cld(NULL) {}
--- a/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -78,20 +78,20 @@
* The destructor will restore the original mark oop.
*/
-class MarkOopContext {
+class MarkWordContext {
private:
oop _obj;
- markOop _mark_oop;
- void swap(MarkOopContext& rhs);
+ markWord _mark_word;
+ void swap(MarkWordContext& rhs);
public:
- MarkOopContext();
- MarkOopContext(const oop obj);
- MarkOopContext(const MarkOopContext& rhs);
- void operator=(MarkOopContext rhs);
- ~MarkOopContext();
+ MarkWordContext();
+ MarkWordContext(const oop obj);
+ MarkWordContext(const MarkWordContext& rhs);
+ void operator=(MarkWordContext rhs);
+ ~MarkWordContext();
};
-typedef SaveRestore<oop, ContextStore<oop, MarkOopContext> > SaveRestoreMarkOops;
+typedef SaveRestore<oop, ContextStore<oop, MarkWordContext> > SaveRestoreMarkWords;
class ClassLoaderData;
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -385,8 +385,8 @@
IF_TRACE_jvmci_3 {
char type[O_BUFLEN];
obj->klass()->name()->as_C_string(type, O_BUFLEN);
- markOop mark = obj->mark();
- TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(mark), p2i(lock));
+ markWord mark = obj->mark();
+ TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, mark.value(), p2i(lock));
tty->flush();
}
if (PrintBiasedLockingStatistics) {
@@ -435,7 +435,7 @@
IF_TRACE_jvmci_3 {
char type[O_BUFLEN];
obj->klass()->name()->as_C_string(type, O_BUFLEN);
- TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(obj->mark()), p2i(lock));
+ TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, obj->mark().value(), p2i(lock));
tty->flush();
}
JRT_END
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -103,7 +103,7 @@
nonstatic_field(Array<Klass*>, _length, int) \
nonstatic_field(Array<Klass*>, _data[0], Klass*) \
\
- volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
+ volatile_nonstatic_field(BasicLock, _displaced_header, markWord) \
\
static_field(CodeCache, _low_bound, address) \
static_field(CodeCache, _high_bound, address) \
@@ -194,7 +194,7 @@
nonstatic_field(Klass, _subklass, Klass*) \
nonstatic_field(Klass, _layout_helper, jint) \
nonstatic_field(Klass, _name, Symbol*) \
- nonstatic_field(Klass, _prototype_header, markOop) \
+ nonstatic_field(Klass, _prototype_header, markWord) \
nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _java_mirror, OopHandle) \
nonstatic_field(Klass, _modifier_flags, jint) \
@@ -257,7 +257,7 @@
volatile_nonstatic_field(ObjectMonitor, _EntryList, ObjectWaiter*) \
volatile_nonstatic_field(ObjectMonitor, _succ, Thread*) \
\
- volatile_nonstatic_field(oopDesc, _mark, markOop) \
+ volatile_nonstatic_field(oopDesc, _mark, markWord) \
volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \
\
static_field(os, _polling_page, address) \
@@ -301,6 +301,8 @@
static_field(StubRoutines, _aescrypt_decryptBlock, address) \
static_field(StubRoutines, _cipherBlockChaining_encryptAESCrypt, address) \
static_field(StubRoutines, _cipherBlockChaining_decryptAESCrypt, address) \
+ static_field(StubRoutines, _electronicCodeBook_encryptAESCrypt, address) \
+ static_field(StubRoutines, _electronicCodeBook_decryptAESCrypt, address) \
static_field(StubRoutines, _counterMode_AESCrypt, address) \
static_field(StubRoutines, _base64_encodeBlock, address) \
static_field(StubRoutines, _ghash_processBlocks, address) \
@@ -563,7 +565,7 @@
declare_constant(Klass::_lh_array_tag_type_value) \
declare_constant(Klass::_lh_array_tag_obj_value) \
\
- declare_constant(markOopDesc::no_hash) \
+ declare_constant(markWord::no_hash) \
\
declare_constant(Method::_caller_sensitive) \
declare_constant(Method::_force_inline) \
@@ -595,19 +597,19 @@
declare_constant(InvocationCounter::count_increment) \
declare_constant(InvocationCounter::count_shift) \
\
- declare_constant(markOopDesc::hash_shift) \
+ declare_constant(markWord::hash_shift) \
\
- declare_constant(markOopDesc::biased_lock_mask_in_place) \
- declare_constant(markOopDesc::age_mask_in_place) \
- declare_constant(markOopDesc::epoch_mask_in_place) \
- declare_constant(markOopDesc::hash_mask) \
- declare_constant(markOopDesc::hash_mask_in_place) \
+ declare_constant(markWord::biased_lock_mask_in_place) \
+ declare_constant(markWord::age_mask_in_place) \
+ declare_constant(markWord::epoch_mask_in_place) \
+ declare_constant(markWord::hash_mask) \
+ declare_constant(markWord::hash_mask_in_place) \
\
- declare_constant(markOopDesc::unlocked_value) \
- declare_constant(markOopDesc::biased_lock_pattern) \
+ declare_constant(markWord::unlocked_value) \
+ declare_constant(markWord::biased_lock_pattern) \
\
- declare_constant(markOopDesc::no_hash_in_place) \
- declare_constant(markOopDesc::no_lock_in_place) \
+ declare_constant(markWord::no_hash_in_place) \
+ declare_constant(markWord::no_lock_in_place) \
#define VM_ADDRESSES(declare_address, declare_preprocessor_address, declare_function) \
declare_function(SharedRuntime::register_finalizer) \
--- a/src/hotspot/share/memory/heapShared.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/memory/heapShared.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -98,7 +98,7 @@
}
unsigned HeapShared::oop_hash(oop const& p) {
- assert(!p->mark()->has_bias_pattern(),
+ assert(!p->mark().has_bias_pattern(),
"this object should never have been locked"); // so identity_hash won't safepoin
unsigned hash = (unsigned)p->identity_hash();
return hash;
--- a/src/hotspot/share/memory/universe.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/memory/universe.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1185,12 +1185,12 @@
}
uintptr_t Universe::verify_mark_mask() {
- return markOopDesc::lock_mask_in_place;
+ return markWord::lock_mask_in_place;
}
uintptr_t Universe::verify_mark_bits() {
intptr_t mask = verify_mark_mask();
- intptr_t bits = (intptr_t)markOopDesc::prototype();
+ intptr_t bits = (intptr_t)markWord::prototype().value();
assert((bits & ~mask) == 0, "no stray header bits");
return bits;
}
--- a/src/hotspot/share/memory/virtualspace.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/memory/virtualspace.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -623,9 +623,9 @@
initialize(size, alignment, large, NULL, false);
}
- assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
+ assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
"area must be distinguishable from marks for mark-sweep");
- assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
+ assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
"area must be distinguishable from marks for mark-sweep");
if (base() != NULL) {
--- a/src/hotspot/share/oops/arrayOop.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/arrayOop.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -34,7 +34,7 @@
// The layout of array Oops is:
//
-// markOop
+// markWord
// Klass* // 32 bits if compressed but declared 64 in LP64.
// length // shares klass memory or allocated after declared fields.
--- a/src/hotspot/share/oops/klass.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/klass.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -195,7 +195,7 @@
// should be NULL before setting it.
Klass::Klass(KlassID id) : _id(id),
_java_mirror(NULL),
- _prototype_header(markOopDesc::prototype()),
+ _prototype_header(markWord::prototype()),
_shared_class_path_index(-1) {
CDS_ONLY(_shared_class_flags = 0;)
CDS_JAVA_HEAP_ONLY(_archived_mirror = 0;)
@@ -744,9 +744,9 @@
if (WizardMode) {
// print header
- obj->mark()->print_on(st);
+ obj->mark().print_on(st);
st->cr();
- st->print(BULLET"prototype_header: " INTPTR_FORMAT, p2i(_prototype_header));
+ st->print(BULLET"prototype_header: " INTPTR_FORMAT, _prototype_header.value());
st->cr();
}
--- a/src/hotspot/share/oops/klass.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/klass.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -28,6 +28,7 @@
#include "classfile/classLoaderData.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
+#include "oops/markOop.hpp"
#include "oops/metadata.hpp"
#include "oops/oop.hpp"
#include "oops/oopHandle.hpp"
@@ -159,7 +160,7 @@
// Biased locking implementation and statistics
// (the 64-bit chunk goes first, to avoid some fragmentation)
jlong _last_biased_lock_bulk_revocation_time;
- markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
+ markWord _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
// vtable length
@@ -619,9 +620,9 @@
// Biased locking support
// Note: the prototype header is always set up to be at least the
- // prototype markOop. If biased locking is enabled it may further be
+ // prototype markWord. If biased locking is enabled it may further be
// biasable and have an epoch.
- markOop prototype_header() const { return _prototype_header; }
+ markWord prototype_header() const { return _prototype_header; }
// NOTE: once instances of this klass are floating around in the
// system, this header must only be updated at a safepoint.
// NOTE 2: currently we only ever set the prototype header to the
@@ -630,7 +631,7 @@
// wanting to reduce the initial scope of this optimization. There
// are potential problems in setting the bias pattern for
// JVM-internal oops.
- inline void set_prototype_header(markOop header);
+ inline void set_prototype_header(markWord header);
static ByteSize prototype_header_offset() { return in_ByteSize(offset_of(Klass, _prototype_header)); }
int biased_lock_revocation_count() const { return (int) _biased_lock_revocation_count; }
--- a/src/hotspot/share/oops/klass.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/klass.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -29,8 +29,8 @@
#include "oops/klass.hpp"
#include "oops/markOop.hpp"
-inline void Klass::set_prototype_header(markOop header) {
- assert(!header->has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
+inline void Klass::set_prototype_header(markWord header) {
+ assert(!header.has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
_prototype_header = header;
}
--- a/src/hotspot/share/oops/markOop.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/markOop.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -27,7 +27,7 @@
#include "runtime/thread.inline.hpp"
#include "runtime/objectMonitor.hpp"
-void markOopDesc::print_on(outputStream* st) const {
+void markWord::print_on(outputStream* st) const {
if (is_marked()) { // last bits = 11
st->print(" marked(" INTPTR_FORMAT ")", value());
} else if (has_monitor()) { // last bits = 10
--- a/src/hotspot/share/oops/markOop.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/markOop.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -25,12 +25,11 @@
#ifndef SHARE_OOPS_MARKOOP_HPP
#define SHARE_OOPS_MARKOOP_HPP
-#include "oops/oop.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
+#include "oops/oopsHierarchy.hpp"
-// The markOop describes the header of an object.
-//
-// Note that the mark is not a real oop but just a word.
-// It is placed in the oop hierarchy for historical reasons.
+// The markWord describes the header of an object.
//
// Bit-format of an object header (most significant first, big endian layout below):
//
@@ -101,12 +100,35 @@
class ObjectMonitor;
class JavaThread;
-class markOopDesc: public oopDesc {
+class markWord {
private:
- // Conversion
- uintptr_t value() const { return (uintptr_t) this; }
+ uintptr_t _value;
public:
+ explicit markWord(uintptr_t value) : _value(value) {}
+
+ markWord() { /* uninitialized */}
+
+ // It is critical for performance that this class be trivially
+ // destructable, copyable, and assignable.
+
+ static markWord from_pointer(void* ptr) {
+ return markWord((uintptr_t)ptr);
+ }
+ void* to_pointer() const {
+ return (void*)_value;
+ }
+
+ bool operator==(const markWord& other) const {
+ return _value == other._value;
+ }
+ bool operator!=(const markWord& other) const {
+ return !operator==(other);
+ }
+
+ // Conversion
+ uintptr_t value() const { return _value; }
+
// Constants
enum { age_bits = 4,
lock_bits = 2,
@@ -164,6 +186,9 @@
enum { max_bias_epoch = epoch_mask };
+ // Creates a markWord with all bits set to zero.
+ static markWord zero() { return markWord(uintptr_t(0)); }
+
// Biased Locking accessors.
// These must be checked by all code which calls into the
// ObjectSynchronizer and other code. The biasing is not understood
@@ -189,17 +214,17 @@
assert(has_bias_pattern(), "should not call this otherwise");
return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
}
- markOop set_bias_epoch(int epoch) {
+ markWord set_bias_epoch(int epoch) {
assert(has_bias_pattern(), "should not call this otherwise");
assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
- return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
+ return markWord(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
}
- markOop incr_bias_epoch() {
+ markWord incr_bias_epoch() {
return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
}
// Prototype mark for initialization
- static markOop biased_locking_prototype() {
- return markOop( biased_lock_pattern );
+ static markWord biased_locking_prototype() {
+ return markWord( biased_lock_pattern );
}
// lock accessors (note that these assume lock_shift == 0)
@@ -214,7 +239,7 @@
}
bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
- // Special temporary state of the markOop while being inflated.
+ // Special temporary state of the markWord while being inflated.
// Code that looks at mark outside a lock need to take this into account.
bool is_being_inflated() const { return (value() == 0); }
@@ -224,7 +249,7 @@
// check for and avoid overwriting a 0 value installed by some
// other thread. (They should spin or block instead. The 0 value
// is transient and *should* be short-lived).
- static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress
+ static markWord INFLATING() { return zero(); } // inflate-in-progress
// Should this header be preserved during GC?
inline bool must_be_preserved(oop obj_containing_mark) const;
@@ -259,9 +284,9 @@
// WARNING: The following routines are used EXCLUSIVELY by
// synchronization functions. They are not really gc safe.
- // They must get updated if markOop layout get changed.
- markOop set_unlocked() const {
- return markOop(value() | unlocked_value);
+ // They must get updated if markWord layout get changed.
+ markWord set_unlocked() const {
+ return markWord(value() | unlocked_value);
}
bool has_locker() const {
return ((value() & lock_mask_in_place) == locked_value);
@@ -281,56 +306,56 @@
bool has_displaced_mark_helper() const {
return ((value() & unlocked_value) == 0);
}
- markOop displaced_mark_helper() const {
+ markWord displaced_mark_helper() const {
assert(has_displaced_mark_helper(), "check");
intptr_t ptr = (value() & ~monitor_value);
- return *(markOop*)ptr;
+ return *(markWord*)ptr;
}
- void set_displaced_mark_helper(markOop m) const {
+ void set_displaced_mark_helper(markWord m) const {
assert(has_displaced_mark_helper(), "check");
intptr_t ptr = (value() & ~monitor_value);
- *(markOop*)ptr = m;
+ ((markWord*)ptr)->_value = m._value;
}
- markOop copy_set_hash(intptr_t hash) const {
+ markWord copy_set_hash(intptr_t hash) const {
intptr_t tmp = value() & (~hash_mask_in_place);
tmp |= ((hash & hash_mask) << hash_shift);
- return (markOop)tmp;
+ return markWord(tmp);
}
// it is only used to be stored into BasicLock as the
// indicator that the lock is using heavyweight monitor
- static markOop unused_mark() {
- return (markOop) marked_value;
+ static markWord unused_mark() {
+ return markWord(marked_value);
}
- // the following two functions create the markOop to be
+ // the following two functions create the markWord to be
// stored into object header, it encodes monitor info
- static markOop encode(BasicLock* lock) {
- return (markOop) lock;
+ static markWord encode(BasicLock* lock) {
+ return from_pointer(lock);
}
- static markOop encode(ObjectMonitor* monitor) {
+ static markWord encode(ObjectMonitor* monitor) {
intptr_t tmp = (intptr_t) monitor;
- return (markOop) (tmp | monitor_value);
+ return markWord(tmp | monitor_value);
}
- static markOop encode(JavaThread* thread, uint age, int bias_epoch) {
+ static markWord encode(JavaThread* thread, uint age, int bias_epoch) {
intptr_t tmp = (intptr_t) thread;
assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
assert(age <= max_age, "age too large");
assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
- return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
+ return markWord(tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
}
// used to encode pointers during GC
- markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
+ markWord clear_lock_bits() { return markWord(value() & ~lock_mask_in_place); }
// age operations
- markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
- markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }
+ markWord set_marked() { return markWord((value() & ~lock_mask_in_place) | marked_value); }
+ markWord set_unmarked() { return markWord((value() & ~lock_mask_in_place) | unlocked_value); }
- uint age() const { return mask_bits(value() >> age_shift, age_mask); }
- markOop set_age(uint v) const {
+ uint age() const { return mask_bits(value() >> age_shift, age_mask); }
+ markWord set_age(uint v) const {
assert((v & ~age_mask) == 0, "shouldn't overflow age field");
- return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
+ return markWord((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
}
- markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
+ markWord incr_age() const { return age() == max_age ? markWord(_value) : set_age(age() + 1); }
// hash operations
intptr_t hash() const {
@@ -342,24 +367,24 @@
}
// Prototype mark for initialization
- static markOop prototype() {
- return markOop( no_hash_in_place | no_lock_in_place );
+ static markWord prototype() {
+ return markWord( no_hash_in_place | no_lock_in_place );
}
// Helper function for restoration of unmarked mark oops during GC
- static inline markOop prototype_for_object(oop obj);
+ static inline markWord prototype_for_object(oop obj);
// Debugging
void print_on(outputStream* st) const;
// Prepare address of oop for placement into mark
- inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
+ inline static markWord encode_pointer_as_mark(void* p) { return from_pointer(p).set_marked(); }
// Recover address of oop from encoded form used in mark
- inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
+ inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
- // These markOops indicate cms free chunk blocks and not objects.
- // In 64 bit, the markOop is set to distinguish them from oops.
+ // These markWords indicate cms free chunk blocks and not objects.
+ // In 64 bit, the markWord is set to distinguish them from oops.
// These are defined in 32 bit mode for vmStructs.
const static uintptr_t cms_free_chunk_pattern = 0x1;
@@ -374,9 +399,9 @@
(address_word)size_mask << size_shift;
#ifdef _LP64
- static markOop cms_free_prototype() {
- return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
- ((cms_free_chunk_pattern & cms_mask) << cms_shift));
+ static markWord cms_free_prototype() {
+ return markWord(((intptr_t)prototype().value() & ~cms_mask_in_place) |
+ ((cms_free_chunk_pattern & cms_mask) << cms_shift));
}
uintptr_t cms_encoding() const {
return mask_bits(value() >> cms_shift, cms_mask);
@@ -387,12 +412,22 @@
}
size_t get_size() const { return (size_t)(value() >> size_shift); }
- static markOop set_size_and_free(size_t size) {
+ static markWord set_size_and_free(size_t size) {
assert((size & ~size_mask) == 0, "shouldn't overflow size field");
- return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
- (((intptr_t)size & size_mask) << size_shift));
+ return markWord(((intptr_t)cms_free_prototype().value() & ~size_mask_in_place) |
+ (((intptr_t)size & size_mask) << size_shift));
}
#endif // _LP64
};
+// Support atomic operations.
+template<>
+struct PrimitiveConversions::Translate<markWord> : public TrueType {
+ typedef markWord Value;
+ typedef uintptr_t Decayed;
+
+ static Decayed decay(const Value& x) { return x.value(); }
+ static Value recover(Decayed x) { return Value(x); }
+};
+
#endif // SHARE_OOPS_MARKOOP_HPP
--- a/src/hotspot/share/oops/markOop.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/markOop.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -31,15 +31,15 @@
#include "runtime/globals.hpp"
// Should this header be preserved during GC (when biased locking is enabled)?
-inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved_with_bias(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
if (has_bias_pattern()) {
// Will reset bias at end of collection
// Mark words of biased and currently locked objects are preserved separately
return false;
}
- markOop prototype_header = prototype_for_object(obj_containing_mark);
- if (prototype_header->has_bias_pattern()) {
+ markWord prototype_header = prototype_for_object(obj_containing_mark);
+ if (prototype_header.has_bias_pattern()) {
// Individual instance which has its bias revoked; must return
// true for correctness
return true;
@@ -48,7 +48,7 @@
}
// Should this header be preserved during GC?
-inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias(obj_containing_mark);
@@ -56,7 +56,7 @@
// Should this header be preserved in the case of a promotion failure
// during scavenge (when biased locking is enabled)?
-inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// We don't explicitly save off the mark words of biased and
// currently-locked objects during scavenges, so if during a
@@ -68,7 +68,7 @@
// BiasedLocking::preserve_marks() / restore_marks() in the middle
// of a scavenge when a promotion failure has first been detected.
if (has_bias_pattern() ||
- prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
+ prototype_for_object(obj_containing_mark).has_bias_pattern()) {
return true;
}
return (!is_unlocked() || !has_no_hash());
@@ -76,7 +76,7 @@
// Should this header be preserved in the case of a promotion failure
// during scavenge?
-inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
@@ -85,11 +85,11 @@
// Same as must_be_preserved_with_bias_for_promotion_failure() except that
// it takes a Klass* argument, instead of the object of which this is the mark word.
-inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
+inline bool markWord::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// CMS scavenges preserve mark words in similar fashion to promotion failures; see above
if (has_bias_pattern() ||
- klass_of_obj_containing_mark->prototype_header()->has_bias_pattern()) {
+ klass_of_obj_containing_mark->prototype_header().has_bias_pattern()) {
return true;
}
return (!is_unlocked() || !has_no_hash());
@@ -97,16 +97,16 @@
// Same as must_be_preserved_for_promotion_failure() except that
// it takes a Klass* argument, instead of the object of which this is the mark word.
-inline bool markOopDesc::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
+inline bool markWord::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
}
-inline markOop markOopDesc::prototype_for_object(oop obj) {
+inline markWord markWord::prototype_for_object(oop obj) {
#ifdef ASSERT
- markOop prototype_header = obj->klass()->prototype_header();
- assert(prototype_header == prototype() || prototype_header->has_bias_pattern(), "corrupt prototype header");
+ markWord prototype_header = obj->klass()->prototype_header();
+ assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");
#endif
return obj->klass()->prototype_header();
}
--- a/src/hotspot/share/oops/method.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/method.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -2067,7 +2067,7 @@
#endif // PRODUCT
};
-// Something that can't be mistaken for an address or a markOop
+// Something that can't be mistaken for an address or a markWord
Method* const JNIMethodBlock::_free_method = (Method*)55;
JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(NULL) {
--- a/src/hotspot/share/oops/objArrayKlass.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/objArrayKlass.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -328,7 +328,7 @@
// lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
- if (or_null) return NULL;
+ if (or_null) return NULL;
ResourceMark rm;
JavaThread *jt = (JavaThread *)THREAD;
@@ -349,14 +349,13 @@
assert(ak->is_objArray_klass(), "incorrect initialization of ObjArrayKlass");
}
}
- } else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
}
ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension());
if (or_null) {
return ak->array_klass_or_null(n);
}
+ THREAD->check_possible_safepoint();
return ak->array_klass(n, THREAD);
}
--- a/src/hotspot/share/oops/objArrayOop.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/objArrayOop.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -37,7 +37,7 @@
friend class ObjArrayKlass;
friend class Runtime1;
friend class psPromotionManager;
- friend class CSetMarkOopClosure;
+ friend class CSetMarkWordClosure;
friend class G1ParScanPartialArrayClosure;
template <class T> T* obj_at_addr(int index) const;
--- a/src/hotspot/share/oops/oop.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/oop.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -105,14 +105,14 @@
return false;
}
- // Header verification: the mark is typically non-NULL. If we're
- // at a safepoint, it must not be null.
+ // Header verification: the mark is typically non-zero. If we're
+ // at a safepoint, it must not be zero.
// Outside of a safepoint, the header could be changing (for example,
// another thread could be inflating a lock on this object).
if (ignore_mark_word) {
return true;
}
- if (obj->mark_raw() != NULL) {
+ if (obj->mark_raw().value() != 0) {
return true;
}
return !SafepointSynchronize::is_at_safepoint();
--- a/src/hotspot/share/oops/oop.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/oop.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -28,6 +28,7 @@
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "oops/access.hpp"
+#include "oops/markOop.hpp"
#include "oops/metadata.hpp"
#include "runtime/atomic.hpp"
#include "utilities/macros.hpp"
@@ -55,24 +56,24 @@
friend class VMStructs;
friend class JVMCIVMStructs;
private:
- volatile markOop _mark;
+ volatile markWord _mark;
union _metadata {
Klass* _klass;
narrowKlass _compressed_klass;
} _metadata;
public:
- inline markOop mark() const;
- inline markOop mark_raw() const;
- inline markOop* mark_addr_raw() const;
+ inline markWord mark() const;
+ inline markWord mark_raw() const;
+ inline markWord* mark_addr_raw() const;
- inline void set_mark(volatile markOop m);
- inline void set_mark_raw(volatile markOop m);
- static inline void set_mark_raw(HeapWord* mem, markOop m);
+ inline void set_mark(volatile markWord m);
+ inline void set_mark_raw(volatile markWord m);
+ static inline void set_mark_raw(HeapWord* mem, markWord m);
- inline void release_set_mark(markOop m);
- inline markOop cas_set_mark(markOop new_mark, markOop old_mark);
- inline markOop cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order = memory_order_conservative);
+ inline void release_set_mark(markWord m);
+ inline markWord cas_set_mark(markWord new_mark, markWord old_mark);
+ inline markWord cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order = memory_order_conservative);
// Used only to re-initialize the mark word (e.g., of promoted
// objects during a GC) -- requires a valid klass pointer
@@ -266,13 +267,13 @@
inline bool is_forwarded() const;
inline void forward_to(oop p);
- inline bool cas_forward_to(oop p, markOop compare, atomic_memory_order order = memory_order_conservative);
+ inline bool cas_forward_to(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);
// Like "forward_to", but inserts the forwarding pointer atomically.
// Exactly one thread succeeds in inserting the forwarding pointer, and
// this call returns "NULL" for that thread; any other thread has the
// value of the forwarding pointer returned and does not modify "this".
- inline oop forward_to_atomic(oop p, markOop compare, atomic_memory_order order = memory_order_conservative);
+ inline oop forward_to_atomic(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);
inline oop forwardee() const;
inline oop forwardee_acquire() const;
@@ -308,9 +309,9 @@
intptr_t slow_identity_hash();
// marks are forwarded to stack when object is locked
- inline bool has_displaced_mark_raw() const;
- inline markOop displaced_mark_raw() const;
- inline void set_displaced_mark_raw(markOop m);
+ inline bool has_displaced_mark_raw() const;
+ inline markWord displaced_mark_raw() const;
+ inline void set_displaced_mark_raw(markWord m);
static bool has_klass_gap();
--- a/src/hotspot/share/oops/oop.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/oop.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -43,48 +43,50 @@
// Implementation of all inlined member functions defined in oop.hpp
// We need a separate file to avoid circular references
-markOop oopDesc::mark() const {
- return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
+markWord oopDesc::mark() const {
+ uintptr_t v = HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
+ return markWord(v);
}
-markOop oopDesc::mark_raw() const {
- return _mark;
+markWord oopDesc::mark_raw() const {
+ return Atomic::load(&_mark);
}
-markOop* oopDesc::mark_addr_raw() const {
- return (markOop*) &_mark;
+markWord* oopDesc::mark_addr_raw() const {
+ return (markWord*) &_mark;
}
-void oopDesc::set_mark(volatile markOop m) {
- HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
+void oopDesc::set_mark(markWord m) {
+ HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
}
-void oopDesc::set_mark_raw(volatile markOop m) {
- _mark = m;
+void oopDesc::set_mark_raw(markWord m) {
+ Atomic::store(m, &_mark);
}
-void oopDesc::set_mark_raw(HeapWord* mem, markOop m) {
- *(markOop*)(((char*)mem) + mark_offset_in_bytes()) = m;
+void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
+ *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
}
-void oopDesc::release_set_mark(markOop m) {
- HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
+void oopDesc::release_set_mark(markWord m) {
+ HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
}
-markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
- return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
+markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
+ uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(new_mark.value(), as_oop(), mark_offset_in_bytes(), old_mark.value());
+ return markWord(v);
}
-markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
+markWord oopDesc::cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order) {
return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
}
void oopDesc::init_mark() {
- set_mark(markOopDesc::prototype_for_object(this));
+ set_mark(markWord::prototype_for_object(this));
}
void oopDesc::init_mark_raw() {
- set_mark_raw(markOopDesc::prototype_for_object(this));
+ set_mark_raw(markWord::prototype_for_object(this));
}
Klass* oopDesc::klass() const {
@@ -319,31 +321,31 @@
inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
bool oopDesc::is_locked() const {
- return mark()->is_locked();
+ return mark().is_locked();
}
bool oopDesc::is_unlocked() const {
- return mark()->is_unlocked();
+ return mark().is_unlocked();
}
bool oopDesc::has_bias_pattern() const {
- return mark()->has_bias_pattern();
+ return mark().has_bias_pattern();
}
bool oopDesc::has_bias_pattern_raw() const {
- return mark_raw()->has_bias_pattern();
+ return mark_raw().has_bias_pattern();
}
// Used only for markSweep, scavenging
bool oopDesc::is_gc_marked() const {
- return mark_raw()->is_marked();
+ return mark_raw().is_marked();
}
// Used by scavengers
bool oopDesc::is_forwarded() const {
// The extra heap check is needed since the obj might be locked, in which case the
// mark would point to a stack location and have the sentinel bit cleared
- return mark_raw()->is_marked();
+ return mark_raw().is_marked();
}
// Used by scavengers
@@ -355,36 +357,36 @@
assert(!is_archived_object(oop(this)) &&
!is_archived_object(p),
"forwarding archive object");
- markOop m = markOopDesc::encode_pointer_as_mark(p);
- assert(m->decode_pointer() == p, "encoding must be reversable");
+ markWord m = markWord::encode_pointer_as_mark(p);
+ assert(m.decode_pointer() == p, "encoding must be reversable");
set_mark_raw(m);
}
// Used by parallel scavengers
-bool oopDesc::cas_forward_to(oop p, markOop compare, atomic_memory_order order) {
+bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
assert(check_obj_alignment(p),
"forwarding to something not aligned");
assert(Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
- markOop m = markOopDesc::encode_pointer_as_mark(p);
- assert(m->decode_pointer() == p, "encoding must be reversable");
+ markWord m = markWord::encode_pointer_as_mark(p);
+ assert(m.decode_pointer() == p, "encoding must be reversable");
return cas_set_mark_raw(m, compare, order) == compare;
}
-oop oopDesc::forward_to_atomic(oop p, markOop compare, atomic_memory_order order) {
+oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
// CMS forwards some non-heap value into the mark oop to reserve oops during
// promotion, so the next two asserts do not hold.
assert(UseConcMarkSweepGC || check_obj_alignment(p),
"forwarding to something not aligned");
assert(UseConcMarkSweepGC || Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
- markOop m = markOopDesc::encode_pointer_as_mark(p);
- assert(m->decode_pointer() == p, "encoding must be reversable");
- markOop old_mark = cas_set_mark_raw(m, compare, order);
+ markWord m = markWord::encode_pointer_as_mark(p);
+ assert(m.decode_pointer() == p, "encoding must be reversable");
+ markWord old_mark = cas_set_mark_raw(m, compare, order);
if (old_mark == compare) {
return NULL;
} else {
- return (oop)old_mark->decode_pointer();
+ return (oop)old_mark.decode_pointer();
}
}
@@ -392,33 +394,32 @@
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
oop oopDesc::forwardee() const {
- return (oop) mark_raw()->decode_pointer();
+ return (oop) mark_raw().decode_pointer();
}
// Note that the forwardee is not the same thing as the displaced_mark.
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
oop oopDesc::forwardee_acquire() const {
- markOop m = OrderAccess::load_acquire(&_mark);
- return (oop) m->decode_pointer();
+ return (oop) OrderAccess::load_acquire(&_mark).decode_pointer();
}
// The following method needs to be MT safe.
uint oopDesc::age() const {
assert(!is_forwarded(), "Attempt to read age from forwarded mark");
if (has_displaced_mark_raw()) {
- return displaced_mark_raw()->age();
+ return displaced_mark_raw().age();
} else {
- return mark_raw()->age();
+ return mark_raw().age();
}
}
void oopDesc::incr_age() {
assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
if (has_displaced_mark_raw()) {
- set_displaced_mark_raw(displaced_mark_raw()->incr_age());
+ set_displaced_mark_raw(displaced_mark_raw().incr_age());
} else {
- set_mark_raw(mark_raw()->incr_age());
+ set_mark_raw(mark_raw().incr_age());
}
}
@@ -460,26 +461,26 @@
intptr_t oopDesc::identity_hash() {
// Fast case; if the object is unlocked and the hash value is set, no locking is needed
// Note: The mark must be read into local variable to avoid concurrent updates.
- markOop mrk = mark();
- if (mrk->is_unlocked() && !mrk->has_no_hash()) {
- return mrk->hash();
- } else if (mrk->is_marked()) {
- return mrk->hash();
+ markWord mrk = mark();
+ if (mrk.is_unlocked() && !mrk.has_no_hash()) {
+ return mrk.hash();
+ } else if (mrk.is_marked()) {
+ return mrk.hash();
} else {
return slow_identity_hash();
}
}
bool oopDesc::has_displaced_mark_raw() const {
- return mark_raw()->has_displaced_mark_helper();
+ return mark_raw().has_displaced_mark_helper();
}
-markOop oopDesc::displaced_mark_raw() const {
- return mark_raw()->displaced_mark_helper();
+markWord oopDesc::displaced_mark_raw() const {
+ return mark_raw().displaced_mark_helper();
}
-void oopDesc::set_displaced_mark_raw(markOop m) {
- mark_raw()->set_displaced_mark_helper(m);
+void oopDesc::set_displaced_mark_raw(markWord m) {
+ mark_raw().set_displaced_mark_helper(m);
}
#endif // SHARE_OOPS_OOP_INLINE_HPP
--- a/src/hotspot/share/oops/oopsHierarchy.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -39,15 +39,14 @@
typedef juint narrowKlass;
typedef void* OopOrNarrowOopStar;
-typedef class markOopDesc* markOop;
#ifndef CHECK_UNHANDLED_OOPS
-typedef class oopDesc* oop;
+typedef class oopDesc* oop;
typedef class instanceOopDesc* instanceOop;
-typedef class arrayOopDesc* arrayOop;
+typedef class arrayOopDesc* arrayOop;
typedef class objArrayOopDesc* objArrayOop;
-typedef class typeArrayOopDesc* typeArrayOop;
+typedef class typeArrayOopDesc* typeArrayOop;
#else
@@ -82,7 +81,6 @@
void register_oop();
void unregister_oop();
- // friend class markOop;
public:
void set_obj(const void* p) {
raw_set_obj(p);
@@ -121,7 +119,6 @@
operator oopDesc* () const volatile { return obj(); }
operator intptr_t* () const { return (intptr_t*)obj(); }
operator PromotedObject* () const { return (PromotedObject*)obj(); }
- operator markOop () const volatile { return markOop(obj()); }
operator address () const { return (address)obj(); }
// from javaCalls.cpp
--- a/src/hotspot/share/oops/typeArrayKlass.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/oops/typeArrayKlass.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -198,13 +198,13 @@
assert(h_ak->is_objArray_klass(), "incorrect initialization of ObjArrayKlass");
}
}
- } else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
}
+
ObjArrayKlass* h_ak = ObjArrayKlass::cast(higher_dimension());
if (or_null) {
return h_ak->array_klass_or_null(n);
}
+ THREAD->check_possible_safepoint();
return h_ak->array_klass(n, THREAD);
}
--- a/src/hotspot/share/opto/c2compiler.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/opto/c2compiler.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -602,6 +602,8 @@
case vmIntrinsics::_aescrypt_decryptBlock:
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
+ case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
+ case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
case vmIntrinsics::_counterMode_AESCrypt:
case vmIntrinsics::_sha_implCompress:
case vmIntrinsics::_sha2_implCompress:
--- a/src/hotspot/share/opto/escape.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/opto/escape.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -990,6 +990,8 @@
strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
+ strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
+ strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
--- a/src/hotspot/share/opto/library_call.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/opto/library_call.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -293,8 +293,10 @@
bool inline_Class_cast();
bool inline_aescrypt_Block(vmIntrinsics::ID id);
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
+ bool inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id);
bool inline_counterMode_AESCrypt(vmIntrinsics::ID id);
Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
+ Node* inline_electronicCodeBook_AESCrypt_predicate(bool decrypting);
Node* inline_counterMode_AESCrypt_predicate();
Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
@@ -807,6 +809,10 @@
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
+ case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
+ case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
+ return inline_electronicCodeBook_AESCrypt(intrinsic_id());
+
case vmIntrinsics::_counterMode_AESCrypt:
return inline_counterMode_AESCrypt(intrinsic_id());
@@ -912,6 +918,10 @@
return inline_cipherBlockChaining_AESCrypt_predicate(false);
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
return inline_cipherBlockChaining_AESCrypt_predicate(true);
+ case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
+ return inline_electronicCodeBook_AESCrypt_predicate(false);
+ case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
+ return inline_electronicCodeBook_AESCrypt_predicate(true);
case vmIntrinsics::_counterMode_AESCrypt:
return inline_counterMode_AESCrypt_predicate();
case vmIntrinsics::_digestBase_implCompressMB:
@@ -3955,9 +3965,9 @@
Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
// Test the header to see if it is unlocked.
- Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
+ Node *lock_mask = _gvn.MakeConX(markWord::biased_lock_mask_in_place);
Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
- Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
+ Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
@@ -3967,8 +3977,8 @@
// We depend on hash_mask being at most 32 bits and avoid the use of
// hash_mask_in_place because it could be larger than 32 bits in a 64-bit
// vm: see markOop.hpp.
- Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask);
- Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
+ Node *hash_mask = _gvn.intcon(markWord::hash_mask);
+ Node *hash_shift = _gvn.intcon(markWord::hash_shift);
Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
// This hack lets the hash bits live anywhere in the mark object now, as long
// as the shift drops the relevant bits into the low 32 bits. Note that
@@ -3977,7 +3987,7 @@
hshifted_header = ConvX2I(hshifted_header);
Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
- Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash);
+ Node *no_hash_val = _gvn.intcon(markWord::no_hash);
Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
@@ -6019,6 +6029,94 @@
return true;
}
+//------------------------------inline_electronicCodeBook_AESCrypt-----------------------
+bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) {
+ address stubAddr = NULL;
+ const char *stubName = NULL;
+
+ assert(UseAES, "need AES instruction support");
+
+ switch (id) {
+ case vmIntrinsics::_electronicCodeBook_encryptAESCrypt:
+ stubAddr = StubRoutines::electronicCodeBook_encryptAESCrypt();
+ stubName = "electronicCodeBook_encryptAESCrypt";
+ break;
+ case vmIntrinsics::_electronicCodeBook_decryptAESCrypt:
+ stubAddr = StubRoutines::electronicCodeBook_decryptAESCrypt();
+ stubName = "electronicCodeBook_decryptAESCrypt";
+ break;
+ default:
+ break;
+ }
+
+ if (stubAddr == NULL) return false;
+
+ Node* electronicCodeBook_object = argument(0);
+ Node* src = argument(1);
+ Node* src_offset = argument(2);
+ Node* len = argument(3);
+ Node* dest = argument(4);
+ Node* dest_offset = argument(5);
+
+ // (1) src and dest are arrays.
+ const Type* src_type = src->Value(&_gvn);
+ const Type* dest_type = dest->Value(&_gvn);
+ const TypeAryPtr* top_src = src_type->isa_aryptr();
+ const TypeAryPtr* top_dest = dest_type->isa_aryptr();
+ assert(top_src != NULL && top_src->klass() != NULL
+ && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
+
+ // checks are the responsibility of the caller
+ Node* src_start = src;
+ Node* dest_start = dest;
+ if (src_offset != NULL || dest_offset != NULL) {
+ assert(src_offset != NULL && dest_offset != NULL, "");
+ src_start = array_element_address(src, src_offset, T_BYTE);
+ dest_start = array_element_address(dest, dest_offset, T_BYTE);
+ }
+
+ // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
+ // (because of the predicated logic executed earlier).
+ // so we cast it here safely.
+ // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
+
+ Node* embeddedCipherObj = load_field_from_object(electronicCodeBook_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
+ if (embeddedCipherObj == NULL) return false;
+
+ // cast it to what we know it will be at runtime
+ const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr();
+ assert(tinst != NULL, "ECB obj is null");
+ assert(tinst->klass()->is_loaded(), "ECB obj is not loaded");
+ ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
+ assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
+
+ ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
+ const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
+ const TypeOopPtr* xtype = aklass->as_instance_type();
+ Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
+ aescrypt_object = _gvn.transform(aescrypt_object);
+
+ // we need to get the start of the aescrypt_object's expanded key array
+ Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
+ if (k_start == NULL) return false;
+
+ Node* ecbCrypt;
+ if (Matcher::pass_original_key_for_aes()) {
+ // no SPARC version for AES/ECB intrinsics now.
+ return false;
+ }
+ // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
+ ecbCrypt = make_runtime_call(RC_LEAF | RC_NO_FP,
+ OptoRuntime::electronicCodeBook_aescrypt_Type(),
+ stubAddr, stubName, TypePtr::BOTTOM,
+ src_start, dest_start, k_start, len);
+
+ // return cipher length (int)
+ Node* retvalue = _gvn.transform(new ProjNode(ecbCrypt, TypeFunc::Parms));
+ set_result(retvalue);
+ return true;
+}
+
//------------------------------inline_counterMode_AESCrypt-----------------------
bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
assert(UseAES, "need AES instruction support");
@@ -6215,6 +6313,65 @@
return _gvn.transform(region);
}
+//----------------------------inline_electronicCodeBook_AESCrypt_predicate----------------------------
+// Return node representing slow path of predicate check.
+// the pseudo code we want to emulate with this predicate is:
+// for encryption:
+// if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
+// for decryption:
+// if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
+// note cipher==plain is more conservative than the original java code but that's OK
+//
+Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypting) {
+ // The receiver was checked for NULL already.
+ Node* objECB = argument(0);
+
+ // Load embeddedCipher field of ElectronicCodeBook object.
+ Node* embeddedCipherObj = load_field_from_object(objECB, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
+
+ // get AESCrypt klass for instanceOf check
+ // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
+ // will have same classloader as ElectronicCodeBook object
+ const TypeInstPtr* tinst = _gvn.type(objECB)->isa_instptr();
+ assert(tinst != NULL, "ECBobj is null");
+ assert(tinst->klass()->is_loaded(), "ECBobj is not loaded");
+
+ // we want to do an instanceof comparison against the AESCrypt class
+ ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
+ if (!klass_AESCrypt->is_loaded()) {
+ // if AESCrypt is not even loaded, we never take the intrinsic fast path
+ Node* ctrl = control();
+ set_control(top()); // no regular fast path
+ return ctrl;
+ }
+ ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
+
+ Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
+ Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
+ Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
+
+ Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
+
+ // for encryption, we are done
+ if (!decrypting)
+ return instof_false; // even if it is NULL
+
+ // for decryption, we need to add a further check to avoid
+ // taking the intrinsic path when cipher and plain are the same
+ // see the original java code for why.
+ RegionNode* region = new RegionNode(3);
+ region->init_req(1, instof_false);
+ Node* src = argument(1);
+ Node* dest = argument(4);
+ Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
+ Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
+ Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
+ region->init_req(2, src_dest_conjoint);
+
+ record_for_igvn(region);
+ return _gvn.transform(region);
+}
+
//----------------------------inline_counterMode_AESCrypt_predicate----------------------------
// Return node representing slow path of predicate check.
// the pseudo code we want to emulate with this predicate is:
--- a/src/hotspot/share/opto/macro.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/opto/macro.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1638,7 +1638,7 @@
if (UseBiasedLocking && (length == NULL)) {
mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
} else {
- mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
+ mark_node = makecon(TypeRawPtr::make((address)markWord::prototype().value()));
}
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
@@ -2196,8 +2196,8 @@
// Get fast path - mark word has the biased lock pattern.
ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node,
- markOopDesc::biased_lock_mask_in_place,
- markOopDesc::biased_lock_pattern, true);
+ markWord::biased_lock_mask_in_place,
+ markWord::biased_lock_pattern, true);
// fast_lock_region->in(1) is set to slow path.
fast_lock_mem_phi->init_req(1, mem);
@@ -2226,7 +2226,7 @@
// Get slow path - mark word does NOT match the value.
Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node,
- (~markOopDesc::age_mask_in_place), 0);
+ (~markWord::age_mask_in_place), 0);
// region->in(3) is set to fast path - the object is biased to the current thread.
mem_phi->init_req(3, mem);
@@ -2237,7 +2237,7 @@
// First, check biased pattern.
// Get fast path - _prototype_header has the same biased lock pattern.
ctrl = opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node,
- markOopDesc::biased_lock_mask_in_place, 0, true);
+ markWord::biased_lock_mask_in_place, 0, true);
not_biased_ctrl = fast_lock_region->in(2); // Slow path
// fast_lock_region->in(2) - the prototype header is no longer biased
@@ -2259,7 +2259,7 @@
// Get slow path - mark word does NOT match epoch bits.
Node* epoch_ctrl = opt_bits_test(ctrl, rebiased_region, 1, x_node,
- markOopDesc::epoch_mask_in_place, 0);
+ markWord::epoch_mask_in_place, 0);
// The epoch of the current bias is not valid, attempt to rebias the object
// toward the current thread.
rebiased_region->init_req(2, epoch_ctrl);
@@ -2269,9 +2269,9 @@
// rebiased_region->in(1) is set to fast path.
// The epoch of the current bias is still valid but we know
// nothing about the owner; it might be set or it might be clear.
- Node* cmask = MakeConX(markOopDesc::biased_lock_mask_in_place |
- markOopDesc::age_mask_in_place |
- markOopDesc::epoch_mask_in_place);
+ Node* cmask = MakeConX(markWord::biased_lock_mask_in_place |
+ markWord::age_mask_in_place |
+ markWord::epoch_mask_in_place);
Node* old = transform_later(new AndXNode(mark_node, cmask));
cast_thread = transform_later(new CastP2XNode(ctrl, thread));
Node* new_mark = transform_later(new OrXNode(cast_thread, old));
@@ -2386,8 +2386,8 @@
Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
ctrl = opt_bits_test(ctrl, region, 3, mark_node,
- markOopDesc::biased_lock_mask_in_place,
- markOopDesc::biased_lock_pattern);
+ markWord::biased_lock_mask_in_place,
+ markWord::biased_lock_pattern);
} else {
region = new RegionNode(3);
// create a Phi for the memory state
--- a/src/hotspot/share/opto/runtime.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/opto/runtime.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -900,6 +900,33 @@
return TypeFunc::make(domain, range);
}
+// for electronicCodeBook calls of aescrypt encrypt/decrypt, three pointers and a length, returning int
+const TypeFunc* OptoRuntime::electronicCodeBook_aescrypt_Type() {
+ // create input type (domain)
+ int num_args = 4;
+ if (Matcher::pass_original_key_for_aes()) {
+ num_args = 5;
+ }
+ int argcnt = num_args;
+ const Type** fields = TypeTuple::fields(argcnt);
+ int argp = TypeFunc::Parms;
+ fields[argp++] = TypePtr::NOTNULL; // src
+ fields[argp++] = TypePtr::NOTNULL; // dest
+ fields[argp++] = TypePtr::NOTNULL; // k array
+ fields[argp++] = TypeInt::INT; // src len
+ if (Matcher::pass_original_key_for_aes()) {
+ fields[argp++] = TypePtr::NOTNULL; // original k array
+ }
+ assert(argp == TypeFunc::Parms + argcnt, "correct decoding");
+ const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields);
+
+ // returning cipher len (int)
+ fields = TypeTuple::fields(1);
+ fields[TypeFunc::Parms + 0] = TypeInt::INT;
+ const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields);
+ return TypeFunc::make(domain, range);
+}
+
//for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int
const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() {
// create input type (domain)
--- a/src/hotspot/share/opto/runtime.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/opto/runtime.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -275,6 +275,7 @@
static const TypeFunc* aescrypt_block_Type();
static const TypeFunc* cipherBlockChaining_aescrypt_Type();
+ static const TypeFunc* electronicCodeBook_aescrypt_Type();
static const TypeFunc* counterMode_aescrypt_Type();
static const TypeFunc* sha_implCompress_Type();
--- a/src/hotspot/share/prims/jvm.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/prims/jvm.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3384,32 +3384,33 @@
JVM_END
-// Raw monitor support //////////////////////////////////////////////////////////////////////
-
-// The lock routine below calls lock_without_safepoint_check in order to get a raw lock
-// without interfering with the safepoint mechanism. The routines are not JVM_LEAF because
-// they might be called by non-java threads. The JVM_LEAF installs a NoHandleMark check
-// that only works with java threads.
+// VM Raw monitor support //////////////////////////////////////////////////////////////////////
+
+// VM Raw monitors (not to be confused with JvmtiRawMonitors) are a simple mutual exclusion
+// lock (not actually monitors: no wait/notify) that is exported by the VM for use by JDK
+// library code. They may be used by JavaThreads and non-JavaThreads and do not participate
+// in the safepoint protocol, thread suspension, thread interruption, or anything of that
+// nature. JavaThreads will be "in native" when using this API from JDK code.
JNIEXPORT void* JNICALL JVM_RawMonitorCreate(void) {
VM_Exit::block_if_vm_exited();
JVMWrapper("JVM_RawMonitorCreate");
- return new Mutex(Mutex::native, "JVM_RawMonitorCreate");
+ return new os::PlatformMutex();
}
JNIEXPORT void JNICALL JVM_RawMonitorDestroy(void *mon) {
VM_Exit::block_if_vm_exited();
JVMWrapper("JVM_RawMonitorDestroy");
- delete ((Mutex*) mon);
+ delete ((os::PlatformMutex*) mon);
}
JNIEXPORT jint JNICALL JVM_RawMonitorEnter(void *mon) {
VM_Exit::block_if_vm_exited();
JVMWrapper("JVM_RawMonitorEnter");
- ((Mutex*) mon)->jvm_raw_lock();
+ ((os::PlatformMutex*) mon)->lock();
return 0;
}
@@ -3417,7 +3418,7 @@
JNIEXPORT void JNICALL JVM_RawMonitorExit(void *mon) {
VM_Exit::block_if_vm_exited();
JVMWrapper("JVM_RawMonitorExit");
- ((Mutex*) mon)->jvm_raw_unlock();
+ ((os::PlatformMutex*) mon)->unlock();
}
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -965,18 +965,18 @@
address owner = NULL;
{
- markOop mark = hobj()->mark();
+ markWord mark = hobj()->mark();
- if (!mark->has_monitor()) {
+ if (!mark.has_monitor()) {
// this object has a lightweight monitor
- if (mark->has_locker()) {
- owner = (address)mark->locker(); // save the address of the Lock word
+ if (mark.has_locker()) {
+ owner = (address)mark.locker(); // save the address of the Lock word
}
// implied else: no owner
} else {
// this object has a heavyweight monitor
- mon = mark->monitor();
+ mon = mark.monitor();
// The owner field of a heavyweight monitor may be NULL for no
// owner, a JavaThread * or it may still be the address of the
--- a/src/hotspot/share/prims/jvmtiExport.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiExport.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -2279,7 +2279,9 @@
address code_begin, address code_end)
{
// register the stub with the current dynamic code event collector
- JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current());
+ // Cannot take safepoint here so do not use state_for to get
+ // jvmti thread state.
+ JvmtiThreadState* state = JavaThread::current()->jvmti_thread_state();
// state can only be NULL if the current thread is exiting which
// should not happen since we're trying to post an event
guarantee(state != NULL, "attempt to register stub via an exiting thread");
@@ -2294,7 +2296,7 @@
if (thread != NULL && thread->is_Java_thread()) {
// Can not take safepoint here.
NoSafepointVerifier no_sfpt;
- // Can not take safepoint here so can not use state_for to get
+ // Cannot take safepoint here so do not use state_for to get
// jvmti thread state.
JvmtiThreadState *state = ((JavaThread*)thread)->jvmti_thread_state();
if (state != NULL) {
@@ -2318,7 +2320,7 @@
if (thread != NULL && thread->is_Java_thread()) {
// Can not take safepoint here.
NoSafepointVerifier no_sfpt;
- // Can not take safepoint here so can not use state_for to get
+ // Cannot take safepoint here so do not use state_for to get
// jvmti thread state.
JvmtiThreadState *state = ((JavaThread*)thread)->jvmti_thread_state();
if (state != NULL) {
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -520,7 +520,7 @@
tag_map = new JvmtiTagMap(env);
}
} else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ DEBUG_ONLY(Thread::current()->check_possible_safepoint());
}
return tag_map;
}
@@ -1628,8 +1628,8 @@
public:
void do_object(oop o) {
if (o != NULL) {
- markOop mark = o->mark();
- if (mark->is_marked()) {
+ markWord mark = o->mark();
+ if (mark.is_marked()) {
o->init_mark();
}
}
@@ -1641,7 +1641,7 @@
private:
// saved headers
static GrowableArray<oop>* _saved_oop_stack;
- static GrowableArray<markOop>* _saved_mark_stack;
+ static GrowableArray<markWord>* _saved_mark_stack;
static bool _needs_reset; // do we need to reset mark bits?
public:
@@ -1656,7 +1656,7 @@
};
GrowableArray<oop>* ObjectMarker::_saved_oop_stack = NULL;
-GrowableArray<markOop>* ObjectMarker::_saved_mark_stack = NULL;
+GrowableArray<markWord>* ObjectMarker::_saved_mark_stack = NULL;
bool ObjectMarker::_needs_reset = true; // need to reset mark bits by default
// initialize ObjectMarker - prepares for object marking
@@ -1667,7 +1667,7 @@
Universe::heap()->ensure_parsability(false); // no need to retire TLABs
// create stacks for interesting headers
- _saved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(4000, true);
+ _saved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(4000, true);
_saved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(4000, true);
if (UseBiasedLocking) {
@@ -1691,7 +1691,7 @@
// now restore the interesting headers
for (int i = 0; i < _saved_oop_stack->length(); i++) {
oop o = _saved_oop_stack->at(i);
- markOop mark = _saved_mark_stack->at(i);
+ markWord mark = _saved_mark_stack->at(i);
o->set_mark(mark);
}
@@ -1707,23 +1707,23 @@
// mark an object
inline void ObjectMarker::mark(oop o) {
assert(Universe::heap()->is_in(o), "sanity check");
- assert(!o->mark()->is_marked(), "should only mark an object once");
+ assert(!o->mark().is_marked(), "should only mark an object once");
// object's mark word
- markOop mark = o->mark();
-
- if (mark->must_be_preserved(o)) {
+ markWord mark = o->mark();
+
+ if (mark.must_be_preserved(o)) {
_saved_mark_stack->push(mark);
_saved_oop_stack->push(o);
}
// mark the object
- o->set_mark(markOopDesc::prototype()->set_marked());
+ o->set_mark(markWord::prototype().set_marked());
}
// return true if object is marked
inline bool ObjectMarker::visited(oop o) {
- return o->mark()->is_marked();
+ return o->mark().is_marked();
}
// Stack allocated class to help ensure that ObjectMarker is used
--- a/src/hotspot/share/prims/jvmtiThreadState.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/prims/jvmtiThreadState.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -90,7 +90,9 @@
// check again with the lock held
state = state_for_while_locked(thread);
} else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ // Check possible safepoint even if state is non-null.
+ // (Note: the thread argument isn't the current thread)
+ DEBUG_ONLY(JavaThread::current()->check_possible_safepoint());
}
return state;
}
--- a/src/hotspot/share/prims/whitebox.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/prims/whitebox.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1748,7 +1748,7 @@
WB_ENTRY(jboolean, WB_IsMonitorInflated(JNIEnv* env, jobject wb, jobject obj))
oop obj_oop = JNIHandles::resolve(obj);
- return (jboolean) obj_oop->mark()->has_monitor();
+ return (jboolean) obj_oop->mark().has_monitor();
WB_END
WB_ENTRY(void, WB_ForceSafepoint(JNIEnv* env, jobject wb))
--- a/src/hotspot/share/runtime/arguments.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/arguments.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -553,6 +553,8 @@
{ "SharedMiscDataSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "FailOverToOldVerifier", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "BindGCTaskThreadsToCPUs", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
+ { "UseGCTaskAffinity", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
// These entries will generate build errors. Their purpose is to test the macros.
@@ -1968,13 +1970,6 @@
return JNI_EINVAL;
}
- // Get around early Solaris scheduling bug
- // (affinity vs other jobs on system)
- // but disallow DR and offlining (5008695).
- if (FLAG_SET_CMDLINE(BindGCTaskThreadsToCPUs, true) != JVMFlag::SUCCESS) {
- return JNI_EINVAL;
- }
-
return JNI_OK;
}
@@ -2872,7 +2867,7 @@
if (FLAG_SET_CMDLINE(AlwaysTenure, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(MaxTenuringThreshold, markOopDesc::max_age + 1) != JVMFlag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(MaxTenuringThreshold, markWord::max_age + 1) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (match_option(option, "-XX:+AlwaysTenure")) {
--- a/src/hotspot/share/runtime/basicLock.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/basicLock.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -28,9 +28,9 @@
void BasicLock::print_on(outputStream* st) const {
st->print("monitor");
- markOop moop = displaced_header();
- if (moop != NULL)
- moop->print_on(st);
+ markWord mark_word = displaced_header();
+ if (mark_word.value() != 0)
+ mark_word.print_on(st);
}
void BasicLock::move_to(oop obj, BasicLock* dest) {
@@ -62,7 +62,7 @@
// is small (given the support for inflated fast-path locking in the fast_lock, etc)
// we'll leave that optimization for another time.
- if (displaced_header()->is_neutral()) {
+ if (displaced_header().is_neutral()) {
ObjectSynchronizer::inflate_helper(obj);
// WARNING: We can not put check here, because the inflation
// will not update the displaced header. Once BasicLock is inflated,
--- a/src/hotspot/share/runtime/basicLock.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/basicLock.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -26,16 +26,22 @@
#define SHARE_RUNTIME_BASICLOCK_HPP
#include "oops/markOop.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/handles.hpp"
class BasicLock {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
- volatile markOop _displaced_header;
+ volatile markWord _displaced_header;
public:
- markOop displaced_header() const { return _displaced_header; }
- void set_displaced_header(markOop header) { _displaced_header = header; }
+ markWord displaced_header() const {
+ return Atomic::load(&_displaced_header);
+ }
+
+ void set_displaced_header(markWord header) {
+ Atomic::store(header, &_displaced_header);
+ }
void print_on(outputStream* st) const;
--- a/src/hotspot/share/runtime/biasedLocking.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/biasedLocking.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -45,11 +45,11 @@
static bool _biased_locking_enabled = false;
BiasedLockingCounters BiasedLocking::_counters;
-static GrowableArray<Handle>* _preserved_oop_stack = NULL;
-static GrowableArray<markOop>* _preserved_mark_stack = NULL;
+static GrowableArray<Handle>* _preserved_oop_stack = NULL;
+static GrowableArray<markWord>* _preserved_mark_stack = NULL;
static void enable_biased_locking(InstanceKlass* k) {
- k->set_prototype_header(markOopDesc::biased_locking_prototype());
+ k->set_prototype_header(markWord::biased_locking_prototype());
}
static void enable_biased_locking() {
@@ -161,24 +161,24 @@
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
assert(Thread::current()->is_VM_thread(), "must be VMThread");
- markOop mark = obj->mark();
- if (!mark->has_bias_pattern()) {
+ markWord mark = obj->mark();
+ if (!mark.has_bias_pattern()) {
if (log_is_enabled(Info, biasedlocking)) {
ResourceMark rm;
log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT
", mark " INTPTR_FORMAT ", type %s"
", requesting thread " INTPTR_FORMAT
" because it's no longer biased)",
- p2i((void *)obj), (intptr_t) mark,
+ p2i((void *)obj), mark.value(),
obj->klass()->external_name(),
(intptr_t) requesting_thread);
}
return NOT_BIASED;
}
- uint age = mark->age();
- markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
+ uint age = mark.age();
+ markWord biased_prototype = markWord::biased_locking_prototype().set_age(age);
+ markWord unbiased_prototype = markWord::prototype().set_age(age);
// Log at "info" level if not bulk, else "trace" level
if (!is_bulk) {
@@ -187,9 +187,9 @@
INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
", allow rebias %d, requesting thread " INTPTR_FORMAT,
p2i((void *)obj),
- (intptr_t) mark,
+ mark.value(),
obj->klass()->external_name(),
- (intptr_t) obj->klass()->prototype_header(),
+ obj->klass()->prototype_header().value(),
(allow_rebias ? 1 : 0),
(intptr_t) requesting_thread);
} else {
@@ -198,14 +198,14 @@
INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
" , allow rebias %d , requesting thread " INTPTR_FORMAT,
p2i((void *)obj),
- (intptr_t) mark,
+ mark.value(),
obj->klass()->external_name(),
- (intptr_t) obj->klass()->prototype_header(),
+ obj->klass()->prototype_header().value(),
(allow_rebias ? 1 : 0),
(intptr_t) requesting_thread);
}
- JavaThread* biased_thread = mark->biased_locker();
+ JavaThread* biased_thread = mark.biased_locker();
if (biased_thread == NULL) {
// Object is anonymously biased. We can get here if, for
// example, we revoke the bias due to an identity hash code
@@ -270,7 +270,7 @@
p2i((void *) mon_info->owner()),
p2i((void *) obj));
// Assume recursive case and fix up highest lock below
- markOop mark = markOopDesc::encode((BasicLock*) NULL);
+ markWord mark = markWord::encode((BasicLock*) NULL);
highest_lock = mon_info->lock();
highest_lock->set_displaced_header(mark);
} else {
@@ -286,8 +286,8 @@
// Reset object header to point to displaced mark.
// Must release store the lock address for platforms without TSO
// ordering (e.g. ppc).
- obj->release_set_mark(markOopDesc::encode(highest_lock));
- assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
+ obj->release_set_mark(markWord::encode(highest_lock));
+ assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
// Log at "info" level if not bulk, else "trace" level
if (!is_bulk) {
log_info(biasedlocking)(" Revoked bias of currently-locked object");
@@ -327,8 +327,8 @@
static HeuristicsResult update_heuristics(oop o) {
- markOop mark = o->mark();
- if (!mark->has_bias_pattern()) {
+ markWord mark = o->mark();
+ if (!mark.has_bias_pattern()) {
return HR_NOT_BIASED;
}
@@ -390,7 +390,7 @@
INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
(bulk_rebias ? "rebias" : "revoke"),
p2i((void *) o),
- (intptr_t) o->mark(),
+ o->mark().value(),
o->klass()->external_name());
jlong cur_time = os::javaTimeMillis();
@@ -413,10 +413,10 @@
// try to update the epoch -- assume another VM operation came in
// and reset the header to the unbiased state, which will
// implicitly cause all existing biases to be revoked
- if (klass->prototype_header()->has_bias_pattern()) {
- int prev_epoch = klass->prototype_header()->bias_epoch();
- klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
- int cur_epoch = klass->prototype_header()->bias_epoch();
+ if (klass->prototype_header().has_bias_pattern()) {
+ int prev_epoch = klass->prototype_header().bias_epoch();
+ klass->set_prototype_header(klass->prototype_header().incr_bias_epoch());
+ int cur_epoch = klass->prototype_header().bias_epoch();
// Now walk all threads' stacks and adjust epochs of any biased
// and locked objects of this data type we encounter
@@ -425,11 +425,11 @@
for (int i = 0; i < cached_monitor_info->length(); i++) {
MonitorInfo* mon_info = cached_monitor_info->at(i);
oop owner = mon_info->owner();
- markOop mark = owner->mark();
- if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+ markWord mark = owner->mark();
+ if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
// We might have encountered this object already in the case of recursive locking
- assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
- owner->set_mark(mark->set_bias_epoch(cur_epoch));
+ assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
+ owner->set_mark(mark.set_bias_epoch(cur_epoch));
}
}
}
@@ -437,7 +437,7 @@
// At this point we're done. All we have to do is potentially
// adjust the header of the given object to revoke its bias.
- single_revoke_at_safepoint(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
+ single_revoke_at_safepoint(o, attempt_rebias_of_object && klass->prototype_header().has_bias_pattern(), true, requesting_thread, NULL);
} else {
if (log_is_enabled(Info, biasedlocking)) {
ResourceMark rm;
@@ -448,7 +448,7 @@
// cause future instances to not be biased, but existing biased
// instances will notice that this implicitly caused their biases
// to be revoked.
- klass->set_prototype_header(markOopDesc::prototype());
+ klass->set_prototype_header(markWord::prototype());
// Now walk all threads' stacks and forcibly revoke the biases of
// any locked and biased objects of this data type we encounter.
@@ -457,8 +457,8 @@
for (int i = 0; i < cached_monitor_info->length(); i++) {
MonitorInfo* mon_info = cached_monitor_info->at(i);
oop owner = mon_info->owner();
- markOop mark = owner->mark();
- if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+ markWord mark = owner->mark();
+ if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
single_revoke_at_safepoint(owner, false, true, requesting_thread, NULL);
}
}
@@ -475,17 +475,17 @@
BiasedLocking::Condition status_code = BIAS_REVOKED;
if (attempt_rebias_of_object &&
- o->mark()->has_bias_pattern() &&
- klass->prototype_header()->has_bias_pattern()) {
- markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
- klass->prototype_header()->bias_epoch());
+ o->mark().has_bias_pattern() &&
+ klass->prototype_header().has_bias_pattern()) {
+ markWord new_mark = markWord::encode(requesting_thread, o->mark().age(),
+ klass->prototype_header().bias_epoch());
o->set_mark(new_mark);
status_code = BIAS_REVOKED_AND_REBIASED;
log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
}
- assert(!o->mark()->has_bias_pattern() ||
- (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
+ assert(!o->mark().has_bias_pattern() ||
+ (attempt_rebias_of_object && (o->mark().biased_locker() == requesting_thread)),
"bug in bulk bias revocation");
return status_code;
@@ -566,28 +566,28 @@
assert(target == _biased_locker, "Wrong thread");
oop o = _obj();
- markOop mark = o->mark();
+ markWord mark = o->mark();
- if (!mark->has_bias_pattern()) {
+ if (!mark.has_bias_pattern()) {
return;
}
- markOop prototype = o->klass()->prototype_header();
- if (!prototype->has_bias_pattern()) {
+ markWord prototype = o->klass()->prototype_header();
+ if (!prototype.has_bias_pattern()) {
// This object has a stale bias from before the handshake
// was requested. If we fail this race, the object's bias
// has been revoked by another thread so we simply return.
- markOop biased_value = mark;
- mark = o->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
- assert(!o->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
+ markWord biased_value = mark;
+ mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
+ assert(!o->mark().has_bias_pattern(), "even if we raced, should still be revoked");
if (biased_value == mark) {
_status_code = BiasedLocking::BIAS_REVOKED;
}
return;
}
- if (_biased_locker == mark->biased_locker()) {
- if (mark->bias_epoch() == prototype->bias_epoch()) {
+ if (_biased_locker == mark.biased_locker()) {
+ if (mark.bias_epoch() == prototype.bias_epoch()) {
// Epoch is still valid. This means biaser could be currently
// synchronized on this object. We must walk its stack looking
// for monitor records associated with this object and change
@@ -595,15 +595,15 @@
ResourceMark rm;
BiasedLocking::walk_stack_and_revoke(o, _biased_locker);
_biased_locker->set_cached_monitor_info(NULL);
- assert(!o->mark()->has_bias_pattern(), "invariant");
+ assert(!o->mark().has_bias_pattern(), "invariant");
_biased_locker_id = JFR_THREAD_ID(_biased_locker);
_status_code = BiasedLocking::BIAS_REVOKED;
return;
} else {
- markOop biased_value = mark;
- mark = o->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
- if (mark == biased_value || !mark->has_bias_pattern()) {
- assert(!o->mark()->has_bias_pattern(), "should be revoked");
+ markWord biased_value = mark;
+ mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
+ if (mark == biased_value || !mark.has_bias_pattern()) {
+ assert(!o->mark().has_bias_pattern(), "should be revoked");
_status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED;
return;
}
@@ -675,7 +675,7 @@
if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) {
post_revocation_event(&event, obj->klass(), &revoke);
}
- assert(!obj->mark()->has_bias_pattern(), "invariant");
+ assert(!obj->mark().has_bias_pattern(), "invariant");
return revoke.status_code();
} else {
// Thread was not alive.
@@ -684,20 +684,20 @@
// on this object.
{
MutexLocker ml(Threads_lock);
- markOop mark = obj->mark();
+ markWord mark = obj->mark();
// Check if somebody else was able to revoke it before biased thread exited.
- if (!mark->has_bias_pattern()) {
+ if (!mark.has_bias_pattern()) {
return NOT_BIASED;
}
ThreadsListHandle tlh;
- markOop prototype = obj->klass()->prototype_header();
- if (!prototype->has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark->biased_locker() &&
- prototype->bias_epoch() == mark->bias_epoch())) {
- obj->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
+ markWord prototype = obj->klass()->prototype_header();
+ if (!prototype.has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark.biased_locker() &&
+ prototype.bias_epoch() == mark.bias_epoch())) {
+ obj->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
if (event.should_commit()) {
post_revocation_event(&event, obj->klass(), &revoke);
}
- assert(!obj->mark()->has_bias_pattern(), "bias should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "bias should be revoked by now");
return BIAS_REVOKED;
}
}
@@ -713,9 +713,9 @@
"if ThreadLocalHandshakes is enabled this should always be executed outside safepoints");
assert(Thread::current() == biased_locker || Thread::current()->is_VM_thread(), "wrong thread");
- markOop mark = obj->mark();
- assert(mark->biased_locker() == biased_locker &&
- obj->klass()->prototype_header()->bias_epoch() == mark->bias_epoch(), "invariant");
+ markWord mark = obj->mark();
+ assert(mark.biased_locker() == biased_locker &&
+ obj->klass()->prototype_header().bias_epoch() == mark.bias_epoch(), "invariant");
log_trace(biasedlocking)("%s(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark "
INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
@@ -723,13 +723,13 @@
Thread::current()->is_VM_thread() ? "VMThread" : "JavaThread",
p2i(Thread::current()),
p2i(obj),
- p2i(mark),
+ mark.value(),
obj->klass()->external_name(),
- p2i(obj->klass()->prototype_header()),
+ obj->klass()->prototype_header().value(),
p2i(biased_locker),
Thread::current()->is_VM_thread() ? "" : "(walking own stack)");
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(obj->mark()->age());
+ markWord unbiased_prototype = markWord::prototype().set_age(obj->mark().age());
GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_locker);
BasicLock* highest_lock = NULL;
@@ -740,7 +740,7 @@
p2i(mon_info->owner()),
p2i(obj));
// Assume recursive case and fix up highest lock below
- markOop mark = markOopDesc::encode((BasicLock*) NULL);
+ markWord mark = markWord::encode((BasicLock*) NULL);
highest_lock = mon_info->lock();
highest_lock->set_displaced_header(mark);
} else {
@@ -756,8 +756,8 @@
// Reset object header to point to displaced mark.
// Must release store the lock address for platforms without TSO
// ordering (e.g. ppc).
- obj->release_set_mark(markOopDesc::encode(highest_lock));
- assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
+ obj->release_set_mark(markWord::encode(highest_lock));
+ assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
log_info(biasedlocking)(" Revoked bias of currently-locked object");
} else {
log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
@@ -765,7 +765,7 @@
obj->set_mark(unbiased_prototype);
}
- assert(!obj->mark()->has_bias_pattern(), "must not be biased");
+ assert(!obj->mark().has_bias_pattern(), "must not be biased");
}
@@ -777,35 +777,35 @@
// efficiently enough that we should not cause these revocations to
// update the heuristics because doing so may cause unwanted bulk
// revocations (which are expensive) to occur.
- markOop mark = obj->mark();
- if (mark->is_biased_anonymously() && !attempt_rebias) {
+ markWord mark = obj->mark();
+ if (mark.is_biased_anonymously() && !attempt_rebias) {
// We are probably trying to revoke the bias of this object due to
// an identity hash code computation. Try to revoke the bias
// without a safepoint. This is possible if we can successfully
// compare-and-exchange an unbiased header into the mark word of
// the object, meaning that no other thread has raced to acquire
// the bias of the object.
- markOop biased_value = mark;
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
- markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
+ markWord biased_value = mark;
+ markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
+ markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
if (res_mark == biased_value) {
return BIAS_REVOKED;
}
mark = res_mark; // Refresh mark with the latest value.
- } else if (mark->has_bias_pattern()) {
+ } else if (mark.has_bias_pattern()) {
Klass* k = obj->klass();
- markOop prototype_header = k->prototype_header();
- if (!prototype_header->has_bias_pattern()) {
+ markWord prototype_header = k->prototype_header();
+ if (!prototype_header.has_bias_pattern()) {
// This object has a stale bias from before the bulk revocation
// for this data type occurred. It's pointless to update the
// heuristics at this point so simply update the header with a
// CAS. If we fail this race, the object's bias has been revoked
// by another thread so we simply return and let the caller deal
// with it.
- obj->cas_set_mark(prototype_header->set_age(mark->age()), mark);
- assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
+ obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
+ assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
return BIAS_REVOKED;
- } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
+ } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
// The epoch of this biasing has expired indicating that the
// object is effectively unbiased. Depending on whether we need
// to rebias or revoke the bias of this object we can do it
@@ -813,18 +813,18 @@
// heuristics. This is normally done in the assembly code but we
// can reach this point due to various points in the runtime
// needing to revoke biases.
- markOop res_mark;
+ markWord res_mark;
if (attempt_rebias) {
assert(THREAD->is_Java_thread(), "");
- markOop biased_value = mark;
- markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
+ markWord biased_value = mark;
+ markWord rebiased_prototype = markWord::encode((JavaThread*) THREAD, mark.age(), prototype_header.bias_epoch());
res_mark = obj->cas_set_mark(rebiased_prototype, mark);
if (res_mark == biased_value) {
return BIAS_REVOKED_AND_REBIASED;
}
} else {
- markOop biased_value = mark;
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+ markWord biased_value = mark;
+ markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
res_mark = obj->cas_set_mark(unbiased_prototype, mark);
if (res_mark == biased_value) {
return BIAS_REVOKED;
@@ -838,7 +838,7 @@
if (heuristics == HR_NOT_BIASED) {
return NOT_BIASED;
} else if (heuristics == HR_SINGLE_REVOKE) {
- JavaThread *blt = mark->biased_locker();
+ JavaThread *blt = mark.biased_locker();
assert(blt != NULL, "invariant");
if (blt == THREAD) {
// A thread is trying to revoke the bias of an object biased
@@ -851,7 +851,7 @@
ResourceMark rm;
walk_stack_and_revoke(obj(), blt);
blt->set_cached_monitor_info(NULL);
- assert(!obj->mark()->has_bias_pattern(), "invariant");
+ assert(!obj->mark().has_bias_pattern(), "invariant");
if (event.should_commit()) {
post_self_revocation_event(&event, obj->klass());
}
@@ -883,8 +883,8 @@
bool clean_my_cache = false;
for (int i = 0; i < objs->length(); i++) {
oop obj = (objs->at(i))();
- markOop mark = obj->mark();
- if (mark->has_bias_pattern()) {
+ markWord mark = obj->mark();
+ if (mark.has_bias_pattern()) {
walk_stack_and_revoke(obj, biaser);
clean_my_cache = true;
}
@@ -948,7 +948,7 @@
// monitors in a prepass and, if they are biased, preserve their
// mark words here. This should be a relatively small set of objects
// especially compared to the number of objects in the heap.
- _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
+ _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(10, true);
_preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
ResourceMark rm;
@@ -966,8 +966,8 @@
if (mon_info->owner_is_scalar_replaced()) continue;
oop owner = mon_info->owner();
if (owner != NULL) {
- markOop mark = owner->mark();
- if (mark->has_bias_pattern()) {
+ markWord mark = owner->mark();
+ if (mark.has_bias_pattern()) {
_preserved_oop_stack->push(Handle(cur, owner));
_preserved_mark_stack->push(mark);
}
@@ -990,7 +990,7 @@
int len = _preserved_oop_stack->length();
for (int i = 0; i < len; i++) {
Handle owner = _preserved_oop_stack->at(i);
- markOop mark = _preserved_mark_stack->at(i);
+ markWord mark = _preserved_mark_stack->at(i);
owner->set_mark(mark);
}
--- a/src/hotspot/share/runtime/biasedLocking.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/biasedLocking.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -102,7 +102,7 @@
// was used in a prior version of this algorithm and did not scale
// well). If too many bias revocations persist, biasing is completely
// disabled for the data type by resetting the prototype header to the
-// unbiased markOop. The fast-path locking code checks to see whether
+// unbiased markWord. The fast-path locking code checks to see whether
// the instance's bias pattern differs from the prototype header's and
// causes the bias to be revoked without reaching a safepoint or,
// again, a bulk heap sweep.
--- a/src/hotspot/share/runtime/deoptimization.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/deoptimization.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1252,15 +1252,15 @@
assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
if (!mon_info->owner_is_scalar_replaced()) {
Handle obj(thread, mon_info->owner());
- markOop mark = obj->mark();
- if (UseBiasedLocking && mark->has_bias_pattern()) {
+ markWord mark = obj->mark();
+ if (UseBiasedLocking && mark.has_bias_pattern()) {
// New allocated objects may have the mark set to anonymously biased.
// Also the deoptimized method may called methods with synchronization
// where the thread-local object is bias locked to the current thread.
- assert(mark->is_biased_anonymously() ||
- mark->biased_locker() == thread, "should be locked to current thread");
+ assert(mark.is_biased_anonymously() ||
+ mark.biased_locker() == thread, "should be locked to current thread");
// Reset mark word to unbiased prototype.
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+ markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
obj->set_mark(unbiased_prototype);
}
BasicLock* lock = mon_info->lock();
--- a/src/hotspot/share/runtime/globals.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/globals.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -736,9 +736,6 @@
product(bool, ReduceSignalUsage, false, \
"Reduce the use of OS signals in Java and/or the VM") \
\
- develop_pd(bool, ShareVtableStubs, \
- "Share vtable stubs (smaller code but worse branch prediction") \
- \
develop(bool, LoadLineNumberTables, true, \
"Tell whether the class file parser loads line number tables") \
\
@@ -2348,7 +2345,7 @@
product(uintx, StringDeduplicationAgeThreshold, 3, \
"A string must reach this age (or be promoted to an old region) " \
"to be considered for deduplication") \
- range(1, markOopDesc::max_age) \
+ range(1, markWord::max_age) \
\
diagnostic(bool, StringDeduplicationResizeALot, false, \
"Force table resize every time the table is scanned") \
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -87,13 +87,16 @@
assert(from != _thread_in_native, "use transition_from_native");
assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
assert(thread->thread_state() == from, "coming from wrong thread state");
+
+ // Check NoSafepointVerifier
+ // This also clears unhandled oops if CheckUnhandledOops is used.
+ thread->check_possible_safepoint();
+
// Change to transition state and ensure it is seen by the VM thread.
thread->set_thread_state_fence((JavaThreadState)(from + 1));
SafepointMechanism::block_if_requested(thread);
thread->set_thread_state(to);
-
- CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
}
// Same as above, but assumes from = _thread_in_Java. This is simpler, since we
--- a/src/hotspot/share/runtime/jniHandles.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/jniHandles.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -115,8 +115,6 @@
} else {
report_handle_allocation_failure(alloc_failmode, "global");
}
- } else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
}
return res;
@@ -140,8 +138,6 @@
} else {
report_handle_allocation_failure(alloc_failmode, "weak global");
}
- } else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
}
return res;
}
--- a/src/hotspot/share/runtime/mutex.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/mutex.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -50,13 +50,6 @@
void Monitor::lock(Thread * self) {
check_safepoint_state(self, true);
-#ifdef CHECK_UNHANDLED_OOPS
- // Clear unhandled oops in JavaThreads so we get a crash right away.
- if (self->is_active_Java_thread()) {
- self->clear_unhandled_oops();
- }
-#endif // CHECK_UNHANDLED_OOPS
-
DEBUG_ONLY(check_prelock_state(self, true));
assert(_owner != self, "invariant");
@@ -196,11 +189,6 @@
guarantee(self->is_active_Java_thread(), "invariant");
assert_wait_lock_state(self);
-#ifdef CHECK_UNHANDLED_OOPS
- // Clear unhandled oops in JavaThreads so we get a crash right away.
- self->clear_unhandled_oops();
-#endif // CHECK_UNHANDLED_OOPS
-
int wait_status;
// conceptually set the owner to NULL in anticipation of
// abdicating the lock in wait
@@ -244,24 +232,6 @@
return wait_status != 0; // return true IFF timeout
}
-
-// Temporary JVM_RawMonitor* support.
-// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
-// jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
-// There's no expectation that JVM_RawMonitors will interoperate properly with the native
-// Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
-// native Mutex-Monitors simply as a matter of convenience.
-
-void Monitor::jvm_raw_lock() {
- _lock.lock();
- assert_owner(NULL);
-}
-
-void Monitor::jvm_raw_unlock() {
- assert_owner(NULL);
- _lock.unlock();
-}
-
Monitor::~Monitor() {
assert_owner(NULL);
}
--- a/src/hotspot/share/runtime/mutex.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/mutex.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -52,8 +52,10 @@
// inherently a bit more special than even locks of the 'special' rank.
// NOTE: It is critical that the rank 'special' be the lowest (earliest)
// (except for "event" and "access") for the deadlock detection to work correctly.
- // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
- // which being external to the VM are not subject to deadlock detection.
+ // The rank native was only for use in Mutexes created by JVM_RawMonitorCreate,
+ // which being external to the VM are not subject to deadlock detection,
+ // however it has now been used by other locks that don't fit into the
+ // deadlock detection scheme.
// While at a safepoint no mutexes of rank safepoint are held by any thread.
// The rank named "leaf" is probably historical (and should
// be changed) -- mutexes of this rank aren't really leaf mutexes
@@ -174,10 +176,6 @@
Thread* owner() const { return _owner; }
bool owned_by_self() const;
- // Support for JVM_RawMonitorEnter & JVM_RawMonitorExit. These can be called by
- // non-Java thread. (We should really have a RawMonitor abstraction)
- void jvm_raw_lock();
- void jvm_raw_unlock();
const char *name() const { return _name; }
void print_on_error(outputStream* st) const;
--- a/src/hotspot/share/runtime/mutexLocker.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/mutexLocker.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -126,8 +126,6 @@
Mutex* OldSets_lock = NULL;
Monitor* RootRegionScan_lock = NULL;
-Monitor* GCTaskManager_lock = NULL;
-
Mutex* Management_lock = NULL;
Monitor* Service_lock = NULL;
Monitor* PeriodicTask_lock = NULL;
--- a/src/hotspot/share/runtime/objectMonitor.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/objectMonitor.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -277,10 +277,10 @@
assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
_recursions);
- assert(((oop)object())->mark() == markOopDesc::encode(this),
+ assert(((oop)object())->mark() == markWord::encode(this),
"object mark must match encoded this: mark=" INTPTR_FORMAT
- ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
- p2i(markOopDesc::encode(this)));
+ ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
+ markWord::encode(this).value());
Self->_Stalled = 0;
return;
}
@@ -365,7 +365,7 @@
assert(_recursions == 0, "invariant");
assert(_owner == Self, "invariant");
assert(_succ != Self, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
// The thread -- now the owner -- is back in vm mode.
// Report the glorious news via TI,DTrace and jvmstat.
@@ -593,7 +593,7 @@
assert(_owner == Self, "invariant");
assert(object() != NULL, "invariant");
// I'd like to write:
- // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+ // guarantee (((oop)(object()))->mark() == markWord::encode(this), "invariant") ;
// but as we're at a safepoint that's not safe.
UnlinkAfterAcquire(Self, &node);
@@ -661,7 +661,7 @@
assert(SelfNode != NULL, "invariant");
assert(SelfNode->_thread == Self, "invariant");
assert(_waiters > 0, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
JavaThread * jt = (JavaThread *) Self;
@@ -729,7 +729,7 @@
// In addition, Self.TState is stable.
assert(_owner == Self, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
UnlinkAfterAcquire(Self, SelfNode);
if (_succ == Self) _succ = NULL;
assert(_succ != Self, "invariant");
@@ -1395,7 +1395,7 @@
// Verify a few postconditions
assert(_owner == Self, "invariant");
assert(_succ != Self, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
// check if the notification happened
if (!WasNotified) {
@@ -1935,7 +1935,7 @@
}
void ObjectMonitor::print_on(outputStream* st) const {
- // The minimal things to print for markOop printing, more can be added for debugging and logging.
+ // The minimal things to print for markWord printing, more can be added for debugging and logging.
st->print("{contentions=0x%08x,waiters=0x%08x"
",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}",
contentions(), waiters(), recursions(),
--- a/src/hotspot/share/runtime/objectMonitor.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/objectMonitor.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
+#include "oops/markOop.hpp"
#include "runtime/os.hpp"
#include "runtime/park.hpp"
#include "runtime/perfData.hpp"
@@ -74,7 +75,7 @@
// ObjectMonitor Layout Overview/Highlights/Restrictions:
//
// - The _header field must be at offset 0 because the displaced header
-// from markOop is stored there. We do not want markOop.hpp to include
+// from markWord is stored there. We do not want markOop.hpp to include
// ObjectMonitor.hpp to avoid exposing ObjectMonitor everywhere. This
// means that ObjectMonitor cannot inherit from any other class nor can
// it use any virtual member functions. This restriction is critical to
@@ -141,13 +142,13 @@
friend class VMStructs;
JVMCI_ONLY(friend class JVMCIVMStructs;)
- volatile markOop _header; // displaced object header word - mark
+ volatile markWord _header; // displaced object header word - mark
void* volatile _object; // backward object pointer - strong root
public:
ObjectMonitor* FreeNext; // Free list linkage
private:
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
- sizeof(volatile markOop) + sizeof(void * volatile) +
+ sizeof(volatile markWord) + sizeof(void * volatile) +
sizeof(ObjectMonitor *));
protected: // protected for JvmtiRawMonitor
void * volatile _owner; // pointer to owning thread OR BasicLock
@@ -213,7 +214,7 @@
static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
- // ObjectMonitor references can be ORed with markOopDesc::monitor_value
+ // ObjectMonitor references can be ORed with markWord::monitor_value
// as part of the ObjectMonitor tagging mechanism. When we combine an
// ObjectMonitor reference with an offset, we need to remove the tag
// value in order to generate the proper address.
@@ -225,11 +226,11 @@
// to the ObjectMonitor reference manipulation code:
//
#define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
- ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
+ ((ObjectMonitor::f ## _offset_in_bytes()) - markWord::monitor_value)
- markOop header() const;
- volatile markOop* header_addr();
- void set_header(markOop hdr);
+ markWord header() const;
+ volatile markWord* header_addr();
+ void set_header(markWord hdr);
intptr_t is_busy() const {
// TODO-FIXME: assert _owner == null implies _recursions = 0
--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -25,6 +25,8 @@
#ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
#define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
+#include "runtime/atomic.hpp"
+
inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
if (THREAD == _owner || THREAD->is_lock_owned((address) _owner)) {
return 1;
@@ -32,17 +34,17 @@
return 0;
}
-inline markOop ObjectMonitor::header() const {
- return _header;
+inline markWord ObjectMonitor::header() const {
+ return Atomic::load(&_header);
}
-inline volatile markOop* ObjectMonitor::header_addr() {
+inline volatile markWord* ObjectMonitor::header_addr() {
assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
return &_header;
}
-inline void ObjectMonitor::set_header(markOop hdr) {
- _header = hdr;
+inline void ObjectMonitor::set_header(markWord hdr) {
+ Atomic::store(hdr, &_header);
}
inline jint ObjectMonitor::waiters() const {
@@ -54,14 +56,14 @@
}
inline void ObjectMonitor::clear() {
- assert(_header != NULL, "must be non-NULL");
+ assert(Atomic::load(&_header).value() != 0, "must be non-zero");
assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
assert(_object != NULL, "must be non-NULL");
assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
- _header = NULL;
+ Atomic::store(markWord::zero(), &_header);
_object = NULL;
}
--- a/src/hotspot/share/runtime/os.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/os.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1082,6 +1082,7 @@
} else {
st->print(INTPTR_FORMAT " is pointing into object: " , p2i(addr));
}
+ ResourceMark rm;
o->print_on(st);
return;
}
--- a/src/hotspot/share/runtime/park.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/park.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,6 @@
ParkEvent * volatile ParkEvent::FreeList = NULL ;
ParkEvent * ParkEvent::Allocate (Thread * t) {
- // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
ParkEvent * ev ;
// Start by trying to recycle an existing but unassociated
@@ -164,4 +163,3 @@
}
Thread::SpinRelease(&ListLock);
}
-
--- a/src/hotspot/share/runtime/safepoint.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/safepoint.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -952,8 +952,7 @@
ThreadSafepointState::ThreadSafepointState(JavaThread *thread)
: _at_poll_safepoint(false), _thread(thread), _safepoint_safe(false),
- _safepoint_id(SafepointSynchronize::InactiveSafepointCounter),
- _orig_thread_state(_thread_uninitialized), _next(NULL) {
+ _safepoint_id(SafepointSynchronize::InactiveSafepointCounter), _next(NULL) {
}
void ThreadSafepointState::create(JavaThread *thread) {
@@ -990,9 +989,6 @@
return;
}
- // Save the state at the start of safepoint processing.
- _orig_thread_state = stable_state;
-
// Check for a thread that is suspended. Note that thread resume tries
// to grab the Threads_lock which we own here, so a thread cannot be
// resumed during safepoint synchronization.
@@ -1060,8 +1056,6 @@
_thread->print_thread_state_on(st);
}
-void ThreadSafepointState::print() const { print_on(tty); }
-
// ---------------------------------------------------------------------------------------------------------------------
// Block the thread at poll or poll return for safepoint/handshake.
--- a/src/hotspot/share/runtime/safepoint.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/safepoint.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -112,14 +112,6 @@
static long _end_of_last_safepoint; // Time of last safepoint in milliseconds
static julong _coalesced_vmop_count; // coalesced vmop count
- // Statistics
- static void begin_statistics(int nof_threads, int nof_running);
- static void update_statistics_on_spin_end();
- static void update_statistics_on_sync_end(jlong end_time);
- static void update_statistics_on_cleanup_end(jlong end_time);
- static void end_statistics(jlong end_time);
- static void print_statistics();
-
// For debug long safepoint
static void print_safepoint_timeout();
@@ -215,7 +207,6 @@
JavaThread* _thread;
bool _safepoint_safe;
volatile uint64_t _safepoint_id;
- JavaThreadState _orig_thread_state;
ThreadSafepointState* _next;
@@ -241,8 +232,6 @@
void reset_safepoint_id();
void set_safepoint_id(uint64_t sid);
- JavaThreadState orig_thread_state() const { return _orig_thread_state; }
-
// Support for safepoint timeout (debugging)
bool is_at_poll_safepoint() { return _at_poll_safepoint; }
void set_at_poll_safepoint(bool val) { _at_poll_safepoint = val; }
@@ -251,7 +240,6 @@
// debugging
void print_on(outputStream* st) const;
- void print() const;
// Initialize
static void create(JavaThread *thread);
--- a/src/hotspot/share/runtime/sharedRuntime.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -3112,10 +3112,10 @@
if (kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
BasicLock *lock = kptr2->lock();
// Inflate so the displaced header becomes position-independent
- if (lock->displaced_header()->is_unlocked())
+ if (lock->displaced_header().is_unlocked())
ObjectSynchronizer::inflate_helper(kptr2->obj());
// Now the displaced header is free to move
- buf[i++] = (intptr_t)lock->displaced_header();
+ buf[i++] = (intptr_t)lock->displaced_header().value();
buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
}
}
--- a/src/hotspot/share/runtime/stubRoutines.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/stubRoutines.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -129,6 +129,8 @@
address StubRoutines::_aescrypt_decryptBlock = NULL;
address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL;
address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL;
+address StubRoutines::_electronicCodeBook_encryptAESCrypt = NULL;
+address StubRoutines::_electronicCodeBook_decryptAESCrypt = NULL;
address StubRoutines::_counterMode_AESCrypt = NULL;
address StubRoutines::_ghash_processBlocks = NULL;
address StubRoutines::_base64_encodeBlock = NULL;
--- a/src/hotspot/share/runtime/stubRoutines.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/stubRoutines.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -210,6 +210,8 @@
static address _aescrypt_decryptBlock;
static address _cipherBlockChaining_encryptAESCrypt;
static address _cipherBlockChaining_decryptAESCrypt;
+ static address _electronicCodeBook_encryptAESCrypt;
+ static address _electronicCodeBook_decryptAESCrypt;
static address _counterMode_AESCrypt;
static address _ghash_processBlocks;
static address _base64_encodeBlock;
@@ -376,6 +378,8 @@
static address aescrypt_decryptBlock() { return _aescrypt_decryptBlock; }
static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; }
static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; }
+ static address electronicCodeBook_encryptAESCrypt() { return _electronicCodeBook_encryptAESCrypt; }
+ static address electronicCodeBook_decryptAESCrypt() { return _electronicCodeBook_decryptAESCrypt; }
static address counterMode_AESCrypt() { return _counterMode_AESCrypt; }
static address ghash_processBlocks() { return _ghash_processBlocks; }
static address base64_encodeBlock() { return _base64_encodeBlock; }
--- a/src/hotspot/share/runtime/synchronizer.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/synchronizer.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -161,16 +161,16 @@
assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // slow-path for invalid obj
- const markOop mark = obj->mark();
+ const markWord mark = obj->mark();
- if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
+ if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) {
// Degenerate notify
// stack-locked by caller so by definition the implied waitset is empty.
return true;
}
- if (mark->has_monitor()) {
- ObjectMonitor * const mon = mark->monitor();
+ if (mark.has_monitor()) {
+ ObjectMonitor * const mon = mark.monitor();
assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
if (mon->owner() != self) return false; // slow-path for IMS exception
@@ -211,10 +211,10 @@
assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // Need to throw NPE
- const markOop mark = obj->mark();
+ const markWord mark = obj->mark();
- if (mark->has_monitor()) {
- ObjectMonitor * const m = mark->monitor();
+ if (mark.has_monitor()) {
+ ObjectMonitor * const m = mark.monitor();
assert(oopDesc::equals((oop) m->object(), obj), "invariant");
Thread * const owner = (Thread *) m->_owner;
@@ -238,7 +238,7 @@
// stack-locking in the object's header, the third check is for
// recursive stack-locking in the displaced header in the BasicLock,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
- lock->set_displaced_header(markOopDesc::unused_mark());
+ lock->set_displaced_header(markWord::unused_mark());
if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
assert(m->_recursions == 0, "invariant");
@@ -275,31 +275,31 @@
assert(!attempt_rebias, "can not rebias toward VM thread");
BiasedLocking::revoke_at_safepoint(obj);
}
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
slow_enter(obj, lock, THREAD);
}
void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
- markOop mark = object->mark();
+ markWord mark = object->mark();
// We cannot check for Biased Locking if we are racing an inflation.
- assert(mark == markOopDesc::INFLATING() ||
- !mark->has_bias_pattern(), "should not see bias pattern here");
+ assert(mark == markWord::INFLATING() ||
+ !mark.has_bias_pattern(), "should not see bias pattern here");
- markOop dhw = lock->displaced_header();
- if (dhw == NULL) {
+ markWord dhw = lock->displaced_header();
+ if (dhw.value() == 0) {
// If the displaced header is NULL, then this exit matches up with
// a recursive enter. No real work to do here except for diagnostics.
#ifndef PRODUCT
- if (mark != markOopDesc::INFLATING()) {
+ if (mark != markWord::INFLATING()) {
// Only do diagnostics if we are not racing an inflation. Simply
// exiting a recursive enter of a Java Monitor that is being
// inflated is safe; see the has_monitor() comment below.
- assert(!mark->is_neutral(), "invariant");
- assert(!mark->has_locker() ||
- THREAD->is_lock_owned((address)mark->locker()), "invariant");
- if (mark->has_monitor()) {
+ assert(!mark.is_neutral(), "invariant");
+ assert(!mark.has_locker() ||
+ THREAD->is_lock_owned((address)mark.locker()), "invariant");
+ if (mark.has_monitor()) {
// The BasicLock's displaced_header is marked as a recursive
// enter and we have an inflated Java Monitor (ObjectMonitor).
// This is a special case where the Java Monitor was inflated
@@ -308,7 +308,7 @@
// Monitor owner's stack and update the BasicLocks because a
// Java Monitor can be asynchronously inflated by a thread that
// does not own the Java Monitor.
- ObjectMonitor * m = mark->monitor();
+ ObjectMonitor * m = mark.monitor();
assert(((oop)(m->object()))->mark() == mark, "invariant");
assert(m->is_entered(THREAD), "invariant");
}
@@ -317,10 +317,10 @@
return;
}
- if (mark == (markOop) lock) {
+ if (mark == markWord::from_pointer(lock)) {
// If the object is stack-locked by the current thread, try to
// swing the displaced header from the BasicLock back to the mark.
- assert(dhw->is_neutral(), "invariant");
+ assert(dhw.is_neutral(), "invariant");
if (object->cas_set_mark(dhw, mark) == mark) {
return;
}
@@ -336,22 +336,22 @@
// We don't need to use fast path here, because it must have been
// failed in the interpreter/compiler code.
void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
- markOop mark = obj->mark();
- assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+ markWord mark = obj->mark();
+ assert(!mark.has_bias_pattern(), "should not see bias pattern here");
- if (mark->is_neutral()) {
+ if (mark.is_neutral()) {
// Anticipate successful CAS -- the ST of the displaced mark must
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
- if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
+ if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
return;
}
// Fall through to inflate() ...
- } else if (mark->has_locker() &&
- THREAD->is_lock_owned((address)mark->locker())) {
- assert(lock != mark->locker(), "must not re-lock the same lock");
- assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
- lock->set_displaced_header(NULL);
+ } else if (mark.has_locker() &&
+ THREAD->is_lock_owned((address)mark.locker())) {
+ assert(lock != mark.locker(), "must not re-lock the same lock");
+ assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
+ lock->set_displaced_header(markWord::from_pointer(NULL));
return;
}
@@ -359,7 +359,7 @@
// so it does not matter what the value is, except that it
// must be non-zero to avoid looking like a re-entrant lock,
// and must not look locked either.
- lock->set_displaced_header(markOopDesc::unused_mark());
+ lock->set_displaced_header(markWord::unused_mark());
inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
}
@@ -386,7 +386,7 @@
intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
@@ -398,7 +398,7 @@
void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
@@ -412,7 +412,7 @@
// the current locking is from JNI instead of Java code
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
THREAD->set_current_pending_monitor_is_from_java(false);
inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
@@ -426,7 +426,7 @@
BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
obj = h_obj();
}
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
// If this thread has locked the object, exit the monitor. We
@@ -464,7 +464,7 @@
int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
@@ -484,7 +484,7 @@
void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
@@ -495,11 +495,11 @@
void ObjectSynchronizer::notify(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
- markOop mark = obj->mark();
- if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+ markWord mark = obj->mark();
+ if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
return;
}
inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
@@ -509,11 +509,11 @@
void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
- markOop mark = obj->mark();
- if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+ markWord mark = obj->mark();
+ if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
return;
}
inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
@@ -556,16 +556,16 @@
static int MonitorScavengeThreshold = 1000000;
static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
-static markOop ReadStableMark(oop obj) {
- markOop mark = obj->mark();
- if (!mark->is_being_inflated()) {
+static markWord ReadStableMark(oop obj) {
+ markWord mark = obj->mark();
+ if (!mark.is_being_inflated()) {
return mark; // normal fast-path return
}
int its = 0;
for (;;) {
- markOop mark = obj->mark();
- if (!mark->is_being_inflated()) {
+ markWord mark = obj->mark();
+ if (!mark.is_being_inflated()) {
return mark; // normal fast-path return
}
@@ -604,7 +604,7 @@
assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
- while (obj->mark() == markOopDesc::INFLATING()) {
+ while (obj->mark() == markWord::INFLATING()) {
// Beware: NakedYield() is advisory and has almost no effect on some platforms
// so we periodically call Self->_ParkEvent->park(1).
// We use a mixed spin/yield/block mechanism.
@@ -673,9 +673,9 @@
value = v;
}
- value &= markOopDesc::hash_mask;
+ value &= markWord::hash_mask;
if (value == 0) value = 0xBAD;
- assert(value != markOopDesc::no_hash, "invariant");
+ assert(value != markWord::no_hash, "invariant");
return value;
}
@@ -688,7 +688,7 @@
// been checked to make sure they can handle a safepoint. The
// added check of the bias pattern is to avoid useless calls to
// thread-local storage.
- if (obj->mark()->has_bias_pattern()) {
+ if (obj->mark().has_bias_pattern()) {
// Handle for oop obj in case of STW safepoint
Handle hobj(Self, obj);
// Relaxing assertion for bug 6320749.
@@ -697,7 +697,7 @@
"biases should not be seen by VM thread here");
BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
obj = hobj();
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
}
@@ -711,20 +711,20 @@
((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
ObjectMonitor* monitor = NULL;
- markOop temp, test;
+ markWord temp, test;
intptr_t hash;
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// object should remain ineligible for biased locking
- assert(!mark->has_bias_pattern(), "invariant");
+ assert(!mark.has_bias_pattern(), "invariant");
- if (mark->is_neutral()) {
- hash = mark->hash(); // this is a normal header
+ if (mark.is_neutral()) {
+ hash = mark.hash(); // this is a normal header
if (hash != 0) { // if it has hash, just return it
return hash;
}
hash = get_next_hash(Self, obj); // allocate a new hash code
- temp = mark->copy_set_hash(hash); // merge the hash code into header
+ temp = mark.copy_set_hash(hash); // merge the hash code into header
// use (machine word version) atomic operation to install the hash
test = obj->cas_set_mark(temp, mark);
if (test == mark) {
@@ -733,20 +733,20 @@
// If atomic operation failed, we must inflate the header
// into heavy weight monitor. We could add more code here
// for fast path, but it does not worth the complexity.
- } else if (mark->has_monitor()) {
- monitor = mark->monitor();
+ } else if (mark.has_monitor()) {
+ monitor = mark.monitor();
temp = monitor->header();
- assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
- hash = temp->hash();
+ assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+ hash = temp.hash();
if (hash != 0) {
return hash;
}
// Skip to the following code to reduce code size
- } else if (Self->is_lock_owned((address)mark->locker())) {
- temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
- assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
- hash = temp->hash(); // by current thread, check if the displaced
- if (hash != 0) { // header contains hash code
+ } else if (Self->is_lock_owned((address)mark.locker())) {
+ temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned
+ assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+ hash = temp.hash(); // by current thread, check if the displaced
+ if (hash != 0) { // header contains hash code
return hash;
}
// WARNING:
@@ -763,19 +763,20 @@
monitor = inflate(Self, obj, inflate_cause_hash_code);
// Load displaced header and check it has hash code
mark = monitor->header();
- assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
- hash = mark->hash();
+ assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
+ hash = mark.hash();
if (hash == 0) {
hash = get_next_hash(Self, obj);
- temp = mark->copy_set_hash(hash); // merge hash code into header
- assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
- test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
+ temp = mark.copy_set_hash(hash); // merge hash code into header
+ assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+ uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
+ test = markWord(v);
if (test != mark) {
// The only update to the ObjectMonitor's header/dmw field
// is to merge in the hash code. If someone adds a new usage
// of the header/dmw field, please update this code.
- hash = test->hash();
- assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test));
+ hash = test.hash();
+ assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
assert(hash != 0, "Trivial unexpected object/monitor header usage.");
}
}
@@ -794,25 +795,25 @@
Handle h_obj) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(h_obj, false, thread);
- assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
assert(thread == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// Uncontended case, header points to stack
- if (mark->has_locker()) {
- return thread->is_lock_owned((address)mark->locker());
+ if (mark.has_locker()) {
+ return thread->is_lock_owned((address)mark.locker());
}
// Contended case, header points to ObjectMonitor (tagged pointer)
- if (mark->has_monitor()) {
- ObjectMonitor* monitor = mark->monitor();
+ if (mark.has_monitor()) {
+ ObjectMonitor* monitor = mark.monitor();
return monitor->is_entered(thread) != 0;
}
// Unlocked case, header in place
- assert(mark->is_neutral(), "sanity check");
+ assert(mark.is_neutral(), "sanity check");
return false;
}
@@ -830,35 +831,35 @@
// Possible mark states: neutral, biased, stack-locked, inflated
- if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+ if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
// CASE: biased
BiasedLocking::revoke_and_rebias(h_obj, false, self);
- assert(!h_obj->mark()->has_bias_pattern(),
+ assert(!h_obj->mark().has_bias_pattern(),
"biases should be revoked by now");
}
assert(self == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
- if (mark->has_locker()) {
- return self->is_lock_owned((address)mark->locker()) ?
+ if (mark.has_locker()) {
+ return self->is_lock_owned((address)mark.locker()) ?
owner_self : owner_other;
}
// CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
// The Object:ObjectMonitor relationship is stable as long as we're
// not at a safepoint.
- if (mark->has_monitor()) {
- void * owner = mark->monitor()->_owner;
+ if (mark.has_monitor()) {
+ void * owner = mark.monitor()->_owner;
if (owner == NULL) return owner_none;
return (owner == self ||
self->is_lock_owned((address)owner)) ? owner_self : owner_other;
}
// CASE: neutral
- assert(mark->is_neutral(), "sanity check");
+ assert(mark.is_neutral(), "sanity check");
return owner_none; // it's unlocked
}
@@ -870,22 +871,22 @@
} else {
BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
}
- assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
oop obj = h_obj();
address owner = NULL;
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// Uncontended case, header points to stack
- if (mark->has_locker()) {
- owner = (address) mark->locker();
+ if (mark.has_locker()) {
+ owner = (address) mark.locker();
}
// Contended case, header points to ObjectMonitor (tagged pointer)
- else if (mark->has_monitor()) {
- ObjectMonitor* monitor = mark->monitor();
+ else if (mark.has_monitor()) {
+ ObjectMonitor* monitor = mark.monitor();
assert(monitor != NULL, "monitor should be non-null");
owner = (address) monitor->owner();
}
@@ -898,7 +899,7 @@
// Unlocked case, header in place
// Cannot have assertion since this object may have been
// locked by another thread when reaching here.
- // assert(mark->is_neutral(), "sanity check");
+ // assert(mark.is_neutral(), "sanity check");
return NULL;
}
@@ -1165,7 +1166,7 @@
void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
bool fromPerThreadAlloc) {
- guarantee(m->header() == NULL, "invariant");
+ guarantee(m->header().value() == 0, "invariant");
guarantee(m->object() == NULL, "invariant");
stringStream ss;
guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
@@ -1301,10 +1302,10 @@
// Fast path code shared by multiple functions
void ObjectSynchronizer::inflate_helper(oop obj) {
- markOop mark = obj->mark();
- if (mark->has_monitor()) {
- assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
- assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
+ markWord mark = obj->mark();
+ if (mark.has_monitor()) {
+ assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
+ assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
return;
}
inflate(Thread::current(), obj, inflate_cause_vm_internal);
@@ -1321,8 +1322,8 @@
EventJavaMonitorInflate event;
for (;;) {
- const markOop mark = object->mark();
- assert(!mark->has_bias_pattern(), "invariant");
+ const markWord mark = object->mark();
+ assert(!mark.has_bias_pattern(), "invariant");
// The mark can be in one of the following states:
// * Inflated - just return
@@ -1332,10 +1333,10 @@
// * BIASED - Illegal. We should never see this
// CASE: inflated
- if (mark->has_monitor()) {
- ObjectMonitor * inf = mark->monitor();
- markOop dmw = inf->header();
- assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
+ if (mark.has_monitor()) {
+ ObjectMonitor * inf = mark.monitor();
+ markWord dmw = inf->header();
+ assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
assert(oopDesc::equals((oop) inf->object(), object), "invariant");
assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
return inf;
@@ -1347,7 +1348,7 @@
// The INFLATING value is transient.
// Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
// We could always eliminate polling by parking the thread on some auxiliary list.
- if (mark == markOopDesc::INFLATING()) {
+ if (mark == markWord::INFLATING()) {
ReadStableMark(object);
continue;
}
@@ -1373,7 +1374,7 @@
LogStreamHandle(Trace, monitorinflation) lsh;
- if (mark->has_locker()) {
+ if (mark.has_locker()) {
ObjectMonitor * m = omAlloc(Self);
// Optimistically prepare the objectmonitor - anticipate successful CAS
// We do this before the CAS in order to minimize the length of time
@@ -1382,7 +1383,7 @@
m->_Responsible = NULL;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
- markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
+ markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
if (cmp != mark) {
omRelease(Self, m, true);
continue; // Interference -- just retry
@@ -1410,7 +1411,7 @@
// drop the lock (restoring the header from the basiclock to the object)
// while inflation is in-progress. This protocol avoids races that might
// would otherwise permit hashCode values to change or "flicker" for an object.
- // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
+ // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
// 0 serves as a "BUSY" inflate-in-progress indicator.
@@ -1418,27 +1419,27 @@
// The owner can't die or unwind past the lock while our INFLATING
// object is in the mark. Furthermore the owner can't complete
// an unlock on the object, either.
- markOop dmw = mark->displaced_mark_helper();
+ markWord dmw = mark.displaced_mark_helper();
// Catch if the object's header is not neutral (not locked and
// not marked is what we care about here).
- assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
+ assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
// Setup monitor fields to proper values -- prepare the monitor
m->set_header(dmw);
- // Optimization: if the mark->locker stack address is associated
+ // Optimization: if the mark.locker stack address is associated
// with this thread we could simply set m->_owner = Self.
// Note that a thread can inflate an object
// that it has stack-locked -- as might happen in wait() -- directly
// with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
- m->set_owner(mark->locker());
+ m->set_owner(mark.locker());
m->set_object(object);
// TODO-FIXME: assert BasicLock->dhw != 0.
// Must preserve store ordering. The monitor state must
// be stable at the time of publishing the monitor address.
- guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
- object->release_set_mark(markOopDesc::encode(m));
+ guarantee(object->mark() == markWord::INFLATING(), "invariant");
+ object->release_set_mark(markWord::encode(m));
// Hopefully the performance counters are allocated on distinct cache lines
// to avoid false sharing on MP systems ...
@@ -1447,7 +1448,7 @@
ResourceMark rm(Self);
lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
- p2i(object->mark()), object->klass()->external_name());
+ object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
@@ -1467,7 +1468,7 @@
// Catch if the object's header is not neutral (not locked and
// not marked is what we care about here).
- assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
+ assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
ObjectMonitor * m = omAlloc(Self);
// prepare m for installation - set monitor to initial state
m->Recycle();
@@ -1476,8 +1477,8 @@
m->_Responsible = NULL;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
- if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
- m->set_header(NULL);
+ if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
+ m->set_header(markWord::zero());
m->set_object(NULL);
m->Recycle();
omRelease(Self, m, true);
@@ -1495,7 +1496,7 @@
ResourceMark rm(Self);
lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
- p2i(object->mark()), object->klass()->external_name());
+ object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
@@ -1533,15 +1534,15 @@
ObjectMonitor** freeTailp) {
bool deflated;
// Normal case ... The monitor is associated with obj.
- const markOop mark = obj->mark();
- guarantee(mark == markOopDesc::encode(mid), "should match: mark="
- INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
- p2i(markOopDesc::encode(mid)));
- // Make sure that mark->monitor() and markOopDesc::encode() agree:
- guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
- ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
- const markOop dmw = mid->header();
- guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
+ const markWord mark = obj->mark();
+ guarantee(mark == markWord::encode(mid), "should match: mark="
+ INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
+ markWord::encode(mid).value());
+ // Make sure that mark.monitor() and markWord::encode() agree:
+ guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
+ ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
+ const markWord dmw = mid->header();
+ guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
if (mid->is_busy()) {
deflated = false;
@@ -1554,7 +1555,7 @@
log_trace(monitorinflation)("deflate_monitor: "
"object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(obj),
- p2i(mark), obj->klass()->external_name());
+ mark.value(), obj->klass()->external_name());
}
// Restore the header back to obj
@@ -1935,16 +1936,16 @@
}
*error_cnt_p = *error_cnt_p + 1;
}
- if (n->header() != NULL) {
+ if (n->header().value() != 0) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": free per-thread monitor must have NULL _header "
"field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
- p2i(n->header()));
+ n->header().value());
} else {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
"must have NULL _header field: _header=" INTPTR_FORMAT,
- p2i(n), p2i(n->header()));
+ p2i(n), n->header().value());
}
*error_cnt_p = *error_cnt_p + 1;
}
@@ -2003,7 +2004,7 @@
// Check an in-use monitor entry; log any errors.
void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n,
outputStream * out, int *error_cnt_p) {
- if (n->header() == NULL) {
+ if (n->header().value() == 0) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": in-use per-thread monitor must have non-NULL _header "
@@ -2026,34 +2027,34 @@
*error_cnt_p = *error_cnt_p + 1;
}
const oop obj = (oop)n->object();
- const markOop mark = obj->mark();
- if (!mark->has_monitor()) {
+ const markWord mark = obj->mark();
+ if (!mark.has_monitor()) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": in-use per-thread monitor's object does not think "
"it has a monitor: obj=" INTPTR_FORMAT ", mark="
- INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), p2i(mark));
+ INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value());
} else {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
"monitor's object does not think it has a monitor: obj="
INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
- p2i(obj), p2i(mark));
+ p2i(obj), mark.value());
}
*error_cnt_p = *error_cnt_p + 1;
}
- ObjectMonitor * const obj_mon = mark->monitor();
+ ObjectMonitor * const obj_mon = mark.monitor();
if (n != obj_mon) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": in-use per-thread monitor's object does not refer "
"to the same monitor: obj=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
- p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
+ p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
} else {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
"monitor's object does not refer to the same monitor: obj="
INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
- INTPTR_FORMAT, p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
+ INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
}
*error_cnt_p = *error_cnt_p + 1;
}
@@ -2119,10 +2120,10 @@
out->print_cr("================== === ================== ==================");
for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
const oop obj = (oop) n->object();
- const markOop mark = n->header();
+ const markWord mark = n->header();
ResourceMark rm;
out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n),
- n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL,
+ n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
p2i(obj), obj->klass()->external_name());
if (n->is_busy() != 0) {
out->print(" (%s)", n->is_busy_to_string(&ss));
@@ -2144,11 +2145,11 @@
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
const oop obj = (oop) n->object();
- const markOop mark = n->header();
+ const markWord mark = n->header();
ResourceMark rm;
out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT
" %s", p2i(jt), p2i(n), n->is_busy() != 0,
- mark->hash() != 0, n->owner() != NULL, p2i(obj),
+ mark.hash() != 0, n->owner() != NULL, p2i(obj),
obj->klass()->external_name());
if (n->is_busy() != 0) {
out->print(" (%s)", n->is_busy_to_string(&ss));
--- a/src/hotspot/share/runtime/thread.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/thread.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -173,7 +173,7 @@
// Support for forcing alignment of thread objects for biased locking
void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
if (UseBiasedLocking) {
- const int alignment = markOopDesc::biased_lock_alignment;
+ const int alignment = markWord::biased_lock_alignment;
size_t aligned_size = size + (alignment - sizeof(intptr_t));
void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
: AllocateHeap(aligned_size, flags, CURRENT_PC,
@@ -223,7 +223,6 @@
// stack and get_thread
set_stack_base(NULL);
set_stack_size(0);
- set_self_raw_id(0);
set_lgrp_id(-1);
DEBUG_ONLY(clear_suspendible_thread();)
@@ -302,9 +301,9 @@
#endif // CHECK_UNHANDLED_OOPS
#ifdef ASSERT
if (UseBiasedLocking) {
- assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
+ assert((((uintptr_t) this) & (markWord::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
assert(this == _real_malloc_address ||
- this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
+ this == align_up(_real_malloc_address, (int)markWord::biased_lock_alignment),
"bug in forced alignment of thread objects");
}
#endif // ASSERT
@@ -859,23 +858,6 @@
return true;
}
-#ifndef PRODUCT
-void JavaThread::record_jump(address target, address instr, const char* file,
- int line) {
-
- // This should not need to be atomic as the only way for simultaneous
- // updates is via interrupts. Even then this should be rare or non-existent
- // and we don't care that much anyway.
-
- int index = _jmp_ring_index;
- _jmp_ring_index = (index + 1) & (jump_ring_buffer_size - 1);
- _jmp_ring[index]._target = (intptr_t) target;
- _jmp_ring[index]._instruction = (intptr_t) instr;
- _jmp_ring[index]._file = file;
- _jmp_ring[index]._line = line;
-}
-#endif // PRODUCT
-
void Thread::interrupt(Thread* thread) {
debug_only(check_for_dangling_thread_pointer(thread);)
os::interrupt(thread);
@@ -1001,35 +983,32 @@
}
}
-static int ref_use_count = 0;
-
-bool Thread::owns_locks_but_compiled_lock() const {
- for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
- if (cur != Compile_lock) return true;
- }
- return false;
+// Checks safepoint allowed and clears unhandled oops at potential safepoints.
+void Thread::check_possible_safepoint() {
+ if (!is_Java_thread()) return;
+
+ if (_no_safepoint_count > 0) {
+ fatal("Possible safepoint reached by thread that does not allow it");
+ }
+#ifdef CHECK_UNHANDLED_OOPS
+ // Clear unhandled oops in JavaThreads so we get a crash right away.
+ clear_unhandled_oops();
+#endif // CHECK_UNHANDLED_OOPS
}
-
-#endif
-
-#ifndef PRODUCT
-
// The flag: potential_vm_operation notifies if this particular safepoint state could potentially
// invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that
// no locks which allow_vm_block's are held
void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
- // Check if current thread is allowed to block at a safepoint
- if (_no_safepoint_count > 0) {
- fatal("Possible safepoint reached by thread that does not allow it");
- }
- if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
+ if (!is_Java_thread()) return;
+
+ check_possible_safepoint();
+
+ if (((JavaThread*)this)->thread_state() != _thread_in_vm) {
fatal("LEAF method calling lock?");
}
-#ifdef ASSERT
- if (potential_vm_operation && is_Java_thread()
- && !Universe::is_bootstrapping()) {
+ if (potential_vm_operation && !Universe::is_bootstrapping()) {
// Make sure we do not hold any locks that the VM thread also uses.
// This could potentially lead to deadlocks
for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
@@ -1052,9 +1031,8 @@
// We could enter a safepoint here and thus have a gc
InterfaceSupport::check_gc_alot();
}
-#endif
}
-#endif
+#endif // ASSERT
bool Thread::is_in_stack(address adr) const {
assert(Thread::current() == this, "is_in_stack can only be called from current thread");
@@ -1683,7 +1661,6 @@
set_deferred_locals(NULL);
set_deopt_mark(NULL);
set_deopt_compiled_method(NULL);
- clear_must_deopt_id();
set_monitor_chunks(NULL);
_on_thread_list = false;
set_thread_state(_thread_new);
@@ -1718,20 +1695,12 @@
_pending_async_exception = NULL;
_thread_stat = NULL;
_thread_stat = new ThreadStatistics();
- _blocked_on_compilation = false;
_jni_active_critical = 0;
_pending_jni_exception_check_fn = NULL;
_do_not_unlock_if_synchronized = false;
_cached_monitor_info = NULL;
_parker = Parker::Allocate(this);
-#ifndef PRODUCT
- _jmp_ring_index = 0;
- for (int ji = 0; ji < jump_ring_buffer_size; ji++) {
- record_jump(NULL, NULL, NULL, 0);
- }
-#endif // PRODUCT
-
// Setup safepoint state info for this thread
ThreadSafepointState::create(this);
@@ -3043,9 +3012,6 @@
void JavaThread::print_thread_state_on(outputStream *st) const {
st->print_cr(" JavaThread state: %s", _get_thread_state_name(_thread_state));
};
-void JavaThread::print_thread_state() const {
- print_thread_state_on(tty);
-}
#endif // PRODUCT
// Called by Threads::print() for VM_PrintThreads operation
@@ -3166,47 +3132,10 @@
return name_str;
}
-
-const char* JavaThread::get_threadgroup_name() const {
- debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
- oop thread_obj = threadObj();
- if (thread_obj != NULL) {
- oop thread_group = java_lang_Thread::threadGroup(thread_obj);
- if (thread_group != NULL) {
- // ThreadGroup.name can be null
- return java_lang_ThreadGroup::name(thread_group);
- }
- }
- return NULL;
-}
-
-const char* JavaThread::get_parent_name() const {
- debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
- oop thread_obj = threadObj();
- if (thread_obj != NULL) {
- oop thread_group = java_lang_Thread::threadGroup(thread_obj);
- if (thread_group != NULL) {
- oop parent = java_lang_ThreadGroup::parent(thread_group);
- if (parent != NULL) {
- // ThreadGroup.name can be null
- return java_lang_ThreadGroup::name(parent);
- }
- }
- }
- return NULL;
-}
-
-ThreadPriority JavaThread::java_priority() const {
- oop thr_oop = threadObj();
- if (thr_oop == NULL) return NormPriority; // Bootstrapping
- ThreadPriority priority = java_lang_Thread::priority(thr_oop);
- assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
- return priority;
-}
-
void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
+ assert(NoPriority <= prio && prio <= MaxPriority, "sanity check");
// Link Java Thread object <-> C++ Thread
// Get the C++ thread object (an oop) from the JNI handle (a jthread)
@@ -3346,23 +3275,6 @@
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
};
-
-static void oops_print(frame* f, const RegisterMap *map) {
- PrintAndVerifyOopClosure print;
- f->print_value();
- f->oops_do(&print, NULL, (RegisterMap*)map);
-}
-
-// Print our all the locations that contain oops and whether they are
-// valid or not. This useful when trying to find the oldest frame
-// where an oop has gone bad since the frame walk is from youngest to
-// oldest.
-void JavaThread::trace_oops() {
- tty->print_cr("[Trace oops]");
- frames_do(oops_print);
-}
-
-
#ifdef ASSERT
// Print or validate the layout of stack frames
void JavaThread::print_frame_layout(int depth, bool validate_only) {
--- a/src/hotspot/share/runtime/thread.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/thread.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -83,7 +83,6 @@
class DeoptResourceMark;
class jvmtiDeferredLocalVariableSet;
-class GCTaskQueue;
class ThreadClosure;
class ICRefillVerifier;
class IdealGraphPrinter;
@@ -108,7 +107,6 @@
// - ConcurrentGCThread
// - WorkerThread
// - GangWorker
-// - GCTaskThread
// - WatcherThread
// - JfrThreadSampler
//
@@ -367,7 +365,7 @@
void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) {
_missed_ic_stub_refill_verifier = verifier;
}
-#endif
+#endif // ASSERT
private:
@@ -381,12 +379,13 @@
//
NOT_PRODUCT(int _no_safepoint_count;) // If 0, thread allow a safepoint to happen
+ private:
// Used by SkipGCALot class.
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
+ friend class GCLocker;
friend class NoSafepointVerifier;
friend class PauseNoSafepointVerifier;
- friend class GCLocker;
volatile void* _polling_page; // Thread local polling page
@@ -704,7 +703,6 @@
// Support for stack overflow handling, get_thread, etc.
address _stack_base;
size_t _stack_size;
- uintptr_t _self_raw_id; // used by get_thread (mutable)
int _lgrp_id;
volatile void** polling_page_addr() { return &_polling_page; }
@@ -724,9 +722,6 @@
return (_stack_base >= adr && adr >= stack_end());
}
- uintptr_t self_raw_id() { return _self_raw_id; }
- void set_self_raw_id(uintptr_t value) { _self_raw_id = value; }
-
int lgrp_id() const { return _lgrp_id; }
void set_lgrp_id(int value) { _lgrp_id = value; }
@@ -752,14 +747,16 @@
void print_owned_locks() const { print_owned_locks_on(tty); }
Monitor* owned_locks() const { return _owned_locks; }
bool owns_locks() const { return owned_locks() != NULL; }
- bool owns_locks_but_compiled_lock() const;
// Deadlock detection
ResourceMark* current_resource_mark() { return _current_resource_mark; }
void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
-#endif
+#endif // ASSERT
- void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN;
+ // These functions check conditions on a JavaThread before possibly going to a safepoint,
+ // including NoSafepointVerifier.
+ void check_for_valid_safepoint_state(bool potential_vm_operation) NOT_DEBUG_RETURN;
+ void check_possible_safepoint() NOT_DEBUG_RETURN;
private:
volatile int _jvmti_env_iteration_count;
@@ -1017,8 +1014,6 @@
// Deopt support
DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization
- intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we
- // transition out of native
CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized
vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays
vframeArray* _vframe_array_last; // Holds last vFrameArray we popped
@@ -1195,17 +1190,6 @@
// failed reallocations.
int _frames_to_pop_failed_realloc;
-#ifndef PRODUCT
- int _jmp_ring_index;
- struct {
- // We use intptr_t instead of address so debugger doesn't try and display strings
- intptr_t _target;
- intptr_t _instruction;
- const char* _file;
- int _line;
- } _jmp_ring[jump_ring_buffer_size];
-#endif // PRODUCT
-
friend class VMThread;
friend class ThreadWaitTransition;
friend class VM_Exit;
@@ -1221,7 +1205,7 @@
#ifdef ASSERT
// verify this JavaThread hasn't be published in the Threads::list yet
void verify_not_published();
-#endif
+#endif // ASSERT
//JNI functiontable getter/setter for JVMTI jni function table interception API.
void set_jni_functions(struct JNINativeInterface_* functionTable) {
@@ -1260,8 +1244,6 @@
oop threadObj() const { return _threadObj; }
void set_threadObj(oop p) { _threadObj = p; }
- ThreadPriority java_priority() const; // Read from threadObj()
-
// Prepare thread and add to priority queue. If a priority is
// not specified, use the priority of the thread object. Threads_lock
// must be held while this function is called.
@@ -1517,10 +1499,6 @@
void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; }
DeoptResourceMark* deopt_mark(void) { return _deopt_mark; }
- intptr_t* must_deopt_id() { return _must_deopt_id; }
- void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; }
- void clear_must_deopt_id() { _must_deopt_id = NULL; }
-
void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; }
CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; }
@@ -1748,16 +1726,8 @@
void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; }
bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; }
-#ifndef PRODUCT
- void record_jump(address target, address instr, const char* file, int line);
-#endif // PRODUCT
-
// For assembly stub generation
static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); }
-#ifndef PRODUCT
- static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index); }
- static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring); }
-#endif // PRODUCT
static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); }
static ByteSize pending_jni_exception_check_fn_offset() {
return byte_offset_of(JavaThread, _pending_jni_exception_check_fn);
@@ -1880,9 +1850,7 @@
void print_on(outputStream* st, bool print_extended_info) const;
void print_on(outputStream* st) const { print_on(st, false); }
void print() const;
- void print_value();
void print_thread_state_on(outputStream*) const PRODUCT_RETURN;
- void print_thread_state() const PRODUCT_RETURN;
void print_on_error(outputStream* st, char* buf, int buflen) const;
void print_name_on_error(outputStream* st, char* buf, int buflen) const;
void verify();
@@ -1891,9 +1859,6 @@
// factor out low-level mechanics for use in both normal and error cases
virtual const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
public:
- const char* get_threadgroup_name() const;
- const char* get_parent_name() const;
-
// Accessing frames
frame last_frame() {
_anchor.make_walkable(this);
@@ -1913,7 +1878,6 @@
void trace_stack() PRODUCT_RETURN;
void trace_stack_from(vframe* start_vf) PRODUCT_RETURN;
void trace_frames() PRODUCT_RETURN;
- void trace_oops() PRODUCT_RETURN;
// Print an annotated view of the stack frames
void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN;
@@ -1921,9 +1885,6 @@
print_frame_layout(0, true);
}
- // Returns the number of stack frames on the stack
- int depth() const;
-
// Function for testing deoptimization
void deoptimize();
void make_zombies();
@@ -2067,18 +2028,6 @@
// Machine dependent stuff
#include OS_CPU_HEADER(thread)
- public:
- void set_blocked_on_compilation(bool value) {
- _blocked_on_compilation = value;
- }
-
- bool blocked_on_compilation() {
- return _blocked_on_compilation;
- }
- protected:
- bool _blocked_on_compilation;
-
-
// JSR166 per-thread parker
private:
Parker* _parker;
@@ -2279,13 +2228,6 @@
// This version may be called by sequential or parallel code.
static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf);
- // Apply "f->do_oop" to roots in all threads that
- // are part of compiled frames
- static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf);
-
- static void convert_hcode_pointers();
- static void restore_hcode_pointers();
-
// Sweeper
static void nmethods_do(CodeBlobClosure* cf);
--- a/src/hotspot/share/runtime/vframe.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/vframe.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -211,7 +211,6 @@
if (monitor->eliminated() && is_compiled_frame()) { // Eliminated in compiled code
if (monitor->owner_is_scalar_replaced()) {
Klass* k = java_lang_Class::as_Klass(monitor->owner_klass());
- // format below for lockbits matches this one.
st->print("\t- eliminated <owner is scalar replaced> (a %s)", k->external_name());
} else {
Handle obj(THREAD, monitor->owner());
@@ -224,7 +223,6 @@
if (monitor->owner() != NULL) {
// the monitor is associated with an object, i.e., it is locked
- markOop mark = NULL;
const char *lock_state = "locked"; // assume we have the monitor locked
if (!found_first_monitor && frame_count == 0) {
// If this is the first frame and we haven't found an owned
@@ -232,18 +230,14 @@
// the lock or if we are blocked trying to acquire it. Only
// an inflated monitor that is first on the monitor list in
// the first frame can block us on a monitor enter.
- mark = monitor->owner()->mark();
- if (mark->has_monitor() &&
+ markWord mark = monitor->owner()->mark();
+ if (mark.has_monitor() &&
( // we have marked ourself as pending on this monitor
- mark->monitor() == thread()->current_pending_monitor() ||
+ mark.monitor() == thread()->current_pending_monitor() ||
// we are not the owner of this monitor
- !mark->monitor()->is_entered(thread())
+ !mark.monitor()->is_entered(thread())
)) {
lock_state = "waiting to lock";
- } else {
- // We own the monitor which is not as interesting so
- // disable the extra printing below.
- mark = NULL;
}
}
print_locked_object_class_name(st, Handle(THREAD, monitor->owner()), lock_state);
--- a/src/hotspot/share/runtime/vframe_hp.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/vframe_hp.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -102,8 +102,7 @@
}
void compiledVFrame::update_deferred_value(BasicType type, int index, jvalue value) {
- assert(fr().is_deoptimized_frame() || thread()->must_deopt_id() == fr().id(),
- "frame must be scheduled for deoptimization");
+ assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization");
GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = thread()->deferred_locals();
jvmtiDeferredLocalVariableSet* locals = NULL;
if (deferred != NULL ) {
--- a/src/hotspot/share/runtime/vmOperations.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/vmOperations.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -51,13 +51,10 @@
const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
{ VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
-void VM_Operation::set_calling_thread(Thread* thread, ThreadPriority priority) {
+void VM_Operation::set_calling_thread(Thread* thread) {
_calling_thread = thread;
- assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
- _priority = priority;
}
-
void VM_Operation::evaluate() {
ResourceMark rm;
LogTarget(Debug, vmoperation) lt;
--- a/src/hotspot/share/runtime/vmOperations.hpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/vmOperations.hpp Mon Aug 19 21:14:34 2019 -0400
@@ -146,7 +146,6 @@
private:
Thread* _calling_thread;
- ThreadPriority _priority;
long _timestamp;
VM_Operation* _next;
VM_Operation* _prev;
@@ -160,8 +159,7 @@
// VM operation support (used by VM thread)
Thread* calling_thread() const { return _calling_thread; }
- ThreadPriority priority() { return _priority; }
- void set_calling_thread(Thread* thread, ThreadPriority priority);
+ void set_calling_thread(Thread* thread);
long timestamp() const { return _timestamp; }
void set_timestamp(long timestamp) { _timestamp = timestamp; }
--- a/src/hotspot/share/runtime/vmStructs.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/vmStructs.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -201,7 +201,7 @@
/* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \
/******************************************************************/ \
\
- volatile_nonstatic_field(oopDesc, _mark, markOop) \
+ volatile_nonstatic_field(oopDesc, _mark, markWord) \
volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \
volatile_nonstatic_field(oopDesc, _metadata._compressed_klass, narrowKlass) \
static_field(BarrierSet, _barrier_set, BarrierSet*) \
@@ -264,7 +264,7 @@
nonstatic_field(Klass, _layout_helper, jint) \
nonstatic_field(Klass, _name, Symbol*) \
nonstatic_field(Klass, _access_flags, AccessFlags) \
- nonstatic_field(Klass, _prototype_header, markOop) \
+ nonstatic_field(Klass, _prototype_header, markWord) \
volatile_nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _next_link, Klass*) \
nonstatic_field(Klass, _vtable_len, int) \
@@ -592,6 +592,8 @@
static_field(StubRoutines, _aescrypt_decryptBlock, address) \
static_field(StubRoutines, _cipherBlockChaining_encryptAESCrypt, address) \
static_field(StubRoutines, _cipherBlockChaining_decryptAESCrypt, address) \
+ static_field(StubRoutines, _electronicCodeBook_encryptAESCrypt, address) \
+ static_field(StubRoutines, _electronicCodeBook_decryptAESCrypt, address) \
static_field(StubRoutines, _counterMode_AESCrypt, address) \
static_field(StubRoutines, _ghash_processBlocks, address) \
static_field(StubRoutines, _base64_encodeBlock, address) \
@@ -906,14 +908,14 @@
/* Monitors */ \
/************/ \
\
- volatile_nonstatic_field(ObjectMonitor, _header, markOop) \
+ volatile_nonstatic_field(ObjectMonitor, _header, markWord) \
unchecked_nonstatic_field(ObjectMonitor, _object, sizeof(void *)) /* NOTE: no type */ \
unchecked_nonstatic_field(ObjectMonitor, _owner, sizeof(void *)) /* NOTE: no type */ \
volatile_nonstatic_field(ObjectMonitor, _contentions, jint) \
volatile_nonstatic_field(ObjectMonitor, _waiters, jint) \
volatile_nonstatic_field(ObjectMonitor, _recursions, intptr_t) \
nonstatic_field(ObjectMonitor, FreeNext, ObjectMonitor*) \
- volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
+ volatile_nonstatic_field(BasicLock, _displaced_header, markWord) \
nonstatic_field(BasicObjectLock, _lock, BasicLock) \
nonstatic_field(BasicObjectLock, _obj, oop) \
static_ptr_volatile_field(ObjectSynchronizer, gBlockList, PaddedObjectMonitor*) \
@@ -1267,7 +1269,6 @@
declare_type(arrayOopDesc, oopDesc) \
declare_type(objArrayOopDesc, arrayOopDesc) \
declare_type(instanceOopDesc, oopDesc) \
- declare_type(markOopDesc, oopDesc) \
\
/**************************************************/ \
/* MetadataOopDesc hierarchy (NOTE: some missing) */ \
@@ -1305,7 +1306,6 @@
/* Oops */ \
/********/ \
\
- declare_oop_type(markOop) \
declare_oop_type(objArrayOop) \
declare_oop_type(oop) \
declare_oop_type(narrowOop) \
@@ -1955,9 +1955,10 @@
declare_toplevel_type(BitMap) \
declare_type(BitMapView, BitMap) \
\
- declare_integer_type(AccessFlags) /* FIXME: wrong type (not integer) */\
+ declare_integer_type(markWord) \
+ declare_integer_type(AccessFlags) /* FIXME: wrong type (not integer) */\
declare_toplevel_type(address) /* FIXME: should this be an integer type? */\
- declare_integer_type(BasicType) /* FIXME: wrong type (not integer) */\
+ declare_integer_type(BasicType) /* FIXME: wrong type (not integer) */ \
declare_toplevel_type(BreakpointInfo) \
declare_toplevel_type(BreakpointInfo*) \
declare_toplevel_type(CodeBlob*) \
@@ -2630,52 +2631,52 @@
VM_LONG_CONSTANTS_GC(declare_constant) \
\
/*********************/ \
- /* MarkOop constants */ \
+ /* markWord constants */ \
/*********************/ \
\
/* Note: some of these are declared as long constants just for */ \
/* consistency. The mask constants are the only ones requiring */ \
/* 64 bits (on 64-bit platforms). */ \
\
- declare_constant(markOopDesc::age_bits) \
- declare_constant(markOopDesc::lock_bits) \
- declare_constant(markOopDesc::biased_lock_bits) \
- declare_constant(markOopDesc::max_hash_bits) \
- declare_constant(markOopDesc::hash_bits) \
+ declare_constant(markWord::age_bits) \
+ declare_constant(markWord::lock_bits) \
+ declare_constant(markWord::biased_lock_bits) \
+ declare_constant(markWord::max_hash_bits) \
+ declare_constant(markWord::hash_bits) \
\
- declare_constant(markOopDesc::lock_shift) \
- declare_constant(markOopDesc::biased_lock_shift) \
- declare_constant(markOopDesc::age_shift) \
- declare_constant(markOopDesc::hash_shift) \
+ declare_constant(markWord::lock_shift) \
+ declare_constant(markWord::biased_lock_shift) \
+ declare_constant(markWord::age_shift) \
+ declare_constant(markWord::hash_shift) \
\
- declare_constant(markOopDesc::lock_mask) \
- declare_constant(markOopDesc::lock_mask_in_place) \
- declare_constant(markOopDesc::biased_lock_mask) \
- declare_constant(markOopDesc::biased_lock_mask_in_place) \
- declare_constant(markOopDesc::biased_lock_bit_in_place) \
- declare_constant(markOopDesc::age_mask) \
- declare_constant(markOopDesc::age_mask_in_place) \
- declare_constant(markOopDesc::epoch_mask) \
- declare_constant(markOopDesc::epoch_mask_in_place) \
- declare_constant(markOopDesc::hash_mask) \
- declare_constant(markOopDesc::hash_mask_in_place) \
- declare_constant(markOopDesc::biased_lock_alignment) \
+ declare_constant(markWord::lock_mask) \
+ declare_constant(markWord::lock_mask_in_place) \
+ declare_constant(markWord::biased_lock_mask) \
+ declare_constant(markWord::biased_lock_mask_in_place) \
+ declare_constant(markWord::biased_lock_bit_in_place) \
+ declare_constant(markWord::age_mask) \
+ declare_constant(markWord::age_mask_in_place) \
+ declare_constant(markWord::epoch_mask) \
+ declare_constant(markWord::epoch_mask_in_place) \
+ declare_constant(markWord::hash_mask) \
+ declare_constant(markWord::hash_mask_in_place) \
+ declare_constant(markWord::biased_lock_alignment) \
\
- declare_constant(markOopDesc::locked_value) \
- declare_constant(markOopDesc::unlocked_value) \
- declare_constant(markOopDesc::monitor_value) \
- declare_constant(markOopDesc::marked_value) \
- declare_constant(markOopDesc::biased_lock_pattern) \
+ declare_constant(markWord::locked_value) \
+ declare_constant(markWord::unlocked_value) \
+ declare_constant(markWord::monitor_value) \
+ declare_constant(markWord::marked_value) \
+ declare_constant(markWord::biased_lock_pattern) \
\
- declare_constant(markOopDesc::no_hash) \
- declare_constant(markOopDesc::no_hash_in_place) \
- declare_constant(markOopDesc::no_lock_in_place) \
- declare_constant(markOopDesc::max_age) \
+ declare_constant(markWord::no_hash) \
+ declare_constant(markWord::no_hash_in_place) \
+ declare_constant(markWord::no_lock_in_place) \
+ declare_constant(markWord::max_age) \
\
- /* Constants in markOop used by CMS. */ \
- declare_constant(markOopDesc::cms_shift) \
- declare_constant(markOopDesc::cms_mask) \
- declare_constant(markOopDesc::size_shift) \
+ /* Constants in markWord used by CMS. */ \
+ declare_constant(markWord::cms_shift) \
+ declare_constant(markWord::cms_mask) \
+ declare_constant(markWord::size_shift) \
\
/* InvocationCounter constants */ \
declare_constant(InvocationCounter::count_increment) \
--- a/src/hotspot/share/runtime/vmThread.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/src/hotspot/share/runtime/vmThread.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -677,7 +677,7 @@
}
// Setup VM_operations for execution
- op->set_calling_thread(t, Thread::get_priority(t));
+ op->set_calling_thread(t);
// It does not make sense to execute the epilogue, if the VM operation object is getting
// deallocated by the VM thread.
@@ -726,7 +726,7 @@
fatal("Nested VM operation %s requested by operation %s",
op->name(), vm_operation()->name());
}
- op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority());
+ op->set_calling_thread(prev_vm_operation->calling_thread());
}
EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name());
--- a/src/java.base/share/classes/com/sun/crypto/provider/ChaCha20Cipher.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/com/sun/crypto/provider/ChaCha20Cipher.java Mon Aug 19 21:14:34 2019 -0400
@@ -33,12 +33,11 @@
import java.nio.ByteOrder;
import java.security.*;
import java.security.spec.AlgorithmParameterSpec;
-import java.util.Arrays;
import java.util.Objects;
+import javax.crypto.*;
import javax.crypto.spec.ChaCha20ParameterSpec;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
-import javax.crypto.*;
import sun.security.util.DerValue;
/**
@@ -134,8 +133,7 @@
/**
* Default constructor.
*/
- protected ChaCha20Cipher() {
- }
+ protected ChaCha20Cipher() { }
/**
* Set the mode of operation. Since this is a stream cipher, there
@@ -185,11 +183,13 @@
}
/**
- * Get the output size based on an input length. In simple stream-cipher
+ * Get the output size required to hold the result of the next update or
+ * doFinal operation. In simple stream-cipher
* mode, the output size will equal the input size. For ChaCha20-Poly1305
* for encryption the output size will be the sum of the input length
- * and tag length. For decryption, the output size will be the input
- * length less the tag length or zero, whichever is larger.
+ * and tag length. For decryption, the output size will be the input
+ * length plus any previously unprocessed data minus the tag
+ * length, minimum zero.
*
* @param inputLen the length in bytes of the input
*
@@ -197,17 +197,7 @@
*/
@Override
protected int engineGetOutputSize(int inputLen) {
- int outLen = 0;
-
- if (mode == MODE_NONE) {
- outLen = inputLen;
- } else if (mode == MODE_AEAD) {
- outLen = (direction == Cipher.ENCRYPT_MODE) ?
- Math.addExact(inputLen, TAG_LENGTH) :
- Integer.max(inputLen - TAG_LENGTH, 0);
- }
-
- return outLen;
+ return engine.getOutputSize(inputLen, true);
}
/**
@@ -237,13 +227,10 @@
AlgorithmParameters params = null;
if (mode == MODE_AEAD) {
try {
- // Force the 12-byte nonce into a DER-encoded OCTET_STRING
- byte[] derNonce = new byte[nonce.length + 2];
- derNonce[0] = 0x04; // OCTET_STRING tag
- derNonce[1] = (byte)nonce.length; // 12-byte length;
- System.arraycopy(nonce, 0, derNonce, 2, nonce.length);
+ // Place the 12-byte nonce into a DER-encoded OCTET_STRING
params = AlgorithmParameters.getInstance("ChaCha20-Poly1305");
- params.init(derNonce);
+ params.init((new DerValue(
+ DerValue.tag_OctetString, nonce).toByteArray()));
} catch (NoSuchAlgorithmException | IOException exc) {
throw new RuntimeException(exc);
}
@@ -638,7 +625,7 @@
*/
@Override
protected byte[] engineUpdate(byte[] in, int inOfs, int inLen) {
- byte[] out = new byte[inLen];
+ byte[] out = new byte[engine.getOutputSize(inLen, false)];
try {
engine.doUpdate(in, inOfs, inLen, out, 0);
} catch (ShortBufferException | KeyException exc) {
@@ -696,7 +683,7 @@
@Override
protected byte[] engineDoFinal(byte[] in, int inOfs, int inLen)
throws AEADBadTagException {
- byte[] output = new byte[engineGetOutputSize(inLen)];
+ byte[] output = new byte[engine.getOutputSize(inLen, true)];
try {
engine.doFinal(in, inOfs, inLen, output, 0);
} catch (ShortBufferException | KeyException exc) {
@@ -1158,6 +1145,17 @@
*/
interface ChaChaEngine {
/**
+ * Size an output buffer based on the input and where applicable
+ * the current state of the engine in a multipart operation.
+ *
+ * @param inLength the input length.
+ * @param isFinal true if this is invoked from a doFinal call.
+ *
+ * @return the recommended size for the output buffer.
+ */
+ int getOutputSize(int inLength, boolean isFinal);
+
+ /**
* Perform a multi-part update for ChaCha20.
*
* @param in the input data.
@@ -1201,6 +1199,12 @@
private EngineStreamOnly () { }
@Override
+ public int getOutputSize(int inLength, boolean isFinal) {
+ // The isFinal parameter is not relevant in this kind of engine
+ return inLength;
+ }
+
+ @Override
public int doUpdate(byte[] in, int inOff, int inLen, byte[] out,
int outOff) throws ShortBufferException, KeyException {
if (initialized) {
@@ -1234,6 +1238,11 @@
private final class EngineAEADEnc implements ChaChaEngine {
+ @Override
+ public int getOutputSize(int inLength, boolean isFinal) {
+ return (isFinal ? Math.addExact(inLength, TAG_LENGTH) : inLength);
+ }
+
private EngineAEADEnc() throws InvalidKeyException {
initAuthenticator();
counter = 1;
@@ -1294,6 +1303,18 @@
private final ByteArrayOutputStream cipherBuf;
private final byte[] tag;
+ @Override
+ public int getOutputSize(int inLen, boolean isFinal) {
+ // If we are performing a decrypt-update we should always return
+ // zero length since we cannot return any data until the tag has
+ // been consumed and verified. CipherSpi.engineGetOutputSize will
+ // always set isFinal to true to get the required output buffer
+ // size.
+ return (isFinal ?
+ Integer.max(Math.addExact((inLen - TAG_LENGTH),
+ cipherBuf.size()), 0) : 0);
+ }
+
private EngineAEADDec() throws InvalidKeyException {
initAuthenticator();
counter = 1;
--- a/src/java.base/share/classes/com/sun/crypto/provider/ElectronicCodeBook.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/com/sun/crypto/provider/ElectronicCodeBook.java Mon Aug 19 21:14:34 2019 -0400
@@ -28,6 +28,8 @@
import java.security.InvalidKeyException;
import java.security.ProviderException;
import sun.security.util.ArrayUtil;
+import java.util.Objects;
+import jdk.internal.HotSpotIntrinsicCandidate;
/**
* This class represents ciphers in electronic codebook (ECB) mode.
@@ -95,6 +97,16 @@
embeddedCipher.init(decrypting, algorithm, key);
}
+ @HotSpotIntrinsicCandidate
+ private int implECBEncrypt(byte [] in, int inOff, int len, byte[] out, int outOff) {
+ for (int i = len; i >= blockSize; i -= blockSize) {
+ embeddedCipher.encryptBlock(in, inOff, out, outOff);
+ inOff += blockSize;
+ outOff += blockSize;
+ }
+ return len;
+ }
+
/**
* Performs encryption operation.
*
@@ -116,9 +128,13 @@
ArrayUtil.blockSizeCheck(len, blockSize);
ArrayUtil.nullAndBoundsCheck(in, inOff, len);
ArrayUtil.nullAndBoundsCheck(out, outOff, len);
+ return implECBEncrypt(in, inOff, len, out, outOff);
+ }
+ @HotSpotIntrinsicCandidate
+ private int implECBDecrypt(byte [] in, int inOff, int len, byte[] out, int outOff) {
for (int i = len; i >= blockSize; i -= blockSize) {
- embeddedCipher.encryptBlock(in, inOff, out, outOff);
+ embeddedCipher.decryptBlock(in, inOff, out, outOff);
inOff += blockSize;
outOff += blockSize;
}
@@ -146,12 +162,6 @@
ArrayUtil.blockSizeCheck(len, blockSize);
ArrayUtil.nullAndBoundsCheck(in, inOff, len);
ArrayUtil.nullAndBoundsCheck(out, outOff, len);
-
- for (int i = len; i >= blockSize; i -= blockSize) {
- embeddedCipher.decryptBlock(in, inOff, out, outOff);
- inOff += blockSize;
- outOff += blockSize;
- }
- return len;
- }
+ return implECBDecrypt(in, inOff, len, out, outOff);
+ }
}
--- a/src/java.base/share/classes/java/lang/StackFrameInfo.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/java/lang/StackFrameInfo.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,23 +31,23 @@
import java.lang.invoke.MethodType;
class StackFrameInfo implements StackFrame {
- private final byte RETAIN_CLASS_REF = 0x01;
-
private final static JavaLangInvokeAccess JLIA =
SharedSecrets.getJavaLangInvokeAccess();
- private final byte flags;
- private final Object memberName;
- private final short bci;
+ private final boolean retainClassRef;
+ private final Object memberName; // MemberName initialized by VM
+ private int bci; // initialized by VM to >= 0
private volatile StackTraceElement ste;
/*
- * Create StackFrameInfo for StackFrameTraverser and LiveStackFrameTraverser
- * to use
+ * Construct an empty StackFrameInfo object that will be filled by the VM
+ * during stack walking.
+ *
+ * @see StackStreamFactory.AbstractStackWalker#callStackWalk
+ * @see StackStreamFactory.AbstractStackWalker#fetchStackFrames
*/
StackFrameInfo(StackWalker walker) {
- this.flags = walker.retainClassRef ? RETAIN_CLASS_REF : 0;
- this.bci = -1;
+ this.retainClassRef = walker.retainClassRef;
this.memberName = JLIA.newMemberName();
}
@@ -135,7 +135,7 @@
}
private void ensureRetainClassRefEnabled() {
- if ((flags & RETAIN_CLASS_REF) == 0) {
+ if (!retainClassRef) {
throw new UnsupportedOperationException("No access to RETAIN_CLASS_REFERENCE");
}
}
--- a/src/java.base/share/classes/java/lang/String.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/java/lang/String.java Mon Aug 19 21:14:34 2019 -0400
@@ -1868,18 +1868,7 @@
* length of this {@code String} object.
*/
public String substring(int beginIndex) {
- if (beginIndex < 0) {
- throw new StringIndexOutOfBoundsException(beginIndex);
- }
- int subLen = length() - beginIndex;
- if (subLen < 0) {
- throw new StringIndexOutOfBoundsException(subLen);
- }
- if (beginIndex == 0) {
- return this;
- }
- return isLatin1() ? StringLatin1.newString(value, beginIndex, subLen)
- : StringUTF16.newString(value, beginIndex, subLen);
+ return substring(beginIndex, length());
}
/**
@@ -3677,7 +3666,7 @@
static void checkIndex(int index, int length) {
if (index < 0 || index >= length) {
throw new StringIndexOutOfBoundsException("index " + index +
- ",length " + length);
+ ", length " + length);
}
}
@@ -3688,7 +3677,7 @@
static void checkOffset(int offset, int length) {
if (offset < 0 || offset > length) {
throw new StringIndexOutOfBoundsException("offset " + offset +
- ",length " + length);
+ ", length " + length);
}
}
--- a/src/java.base/share/classes/java/lang/StringUTF16.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/java/lang/StringUTF16.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -489,7 +489,7 @@
final char lo = Character.lowSurrogate(ch);
checkBoundsBeginEnd(fromIndex, max, value);
for (int i = fromIndex; i < max - 1; i++) {
- if (getChar(value, i) == hi && getChar(value, i + 1 ) == lo) {
+ if (getChar(value, i) == hi && getChar(value, i + 1) == lo) {
return i;
}
}
--- a/src/java.base/share/classes/java/lang/System.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/java/lang/System.java Mon Aug 19 21:14:34 2019 -0400
@@ -74,6 +74,7 @@
import jdk.internal.logger.LocalizedLoggerWrapper;
import jdk.internal.util.SystemProps;
import jdk.internal.vm.annotation.Stable;
+import sun.nio.fs.DefaultFileSystemProvider;
import sun.reflect.annotation.AnnotationType;
import sun.nio.ch.Interruptible;
import sun.security.util.SecurityConstants;
@@ -339,6 +340,8 @@
if (security == null) {
// ensure image reader is initialized
Object.class.getResource("java/lang/ANY");
+ // ensure the default file system is initialized
+ DefaultFileSystemProvider.theFileSystem();
}
if (sm != null) {
try {
--- a/src/java.base/share/classes/java/security/CodeSource.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/java/security/CodeSource.java Mon Aug 19 21:14:34 2019 -0400
@@ -57,7 +57,7 @@
*
* @serial
*/
- private URL location;
+ private final URL location;
/*
* The code signers.
--- a/src/java.base/share/classes/jdk/internal/loader/BuiltinClassLoader.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/jdk/internal/loader/BuiltinClassLoader.java Mon Aug 19 21:14:34 2019 -0400
@@ -25,8 +25,6 @@
package jdk.internal.loader;
-import java.io.File;
-import java.io.FilePermission;
import java.io.IOException;
import java.io.InputStream;
import java.lang.module.ModuleDescriptor;
@@ -40,7 +38,6 @@
import java.security.AccessController;
import java.security.CodeSigner;
import java.security.CodeSource;
-import java.security.Permission;
import java.security.PermissionCollection;
import java.security.PrivilegedAction;
import java.security.PrivilegedActionException;
@@ -65,6 +62,7 @@
import jdk.internal.module.ModulePatcher.PatchedModuleReader;
import jdk.internal.module.Resources;
import jdk.internal.vm.annotation.Stable;
+import sun.security.util.LazyCodeSourcePermissionCollection;
/**
@@ -966,39 +964,9 @@
*/
@Override
protected PermissionCollection getPermissions(CodeSource cs) {
- PermissionCollection perms = super.getPermissions(cs);
-
- // add the permission to access the resource
- URL url = cs.getLocation();
- if (url == null)
- return perms;
-
- // avoid opening connection when URL is to resource in run-time image
- if (url.getProtocol().equals("jrt")) {
- perms.add(new RuntimePermission("accessSystemModules"));
- return perms;
- }
-
- // open connection to determine the permission needed
- try {
- Permission p = url.openConnection().getPermission();
- if (p != null) {
- // for directories then need recursive access
- if (p instanceof FilePermission) {
- String path = p.getName();
- if (path.endsWith(File.separator)) {
- path += "-";
- p = new FilePermission(path, "read");
- }
- }
- perms.add(p);
- }
- } catch (IOException ioe) { }
-
- return perms;
+ return new LazyCodeSourcePermissionCollection(super.getPermissions(cs), cs);
}
-
// -- miscellaneous supporting methods
/**
--- a/src/java.base/share/classes/sun/net/www/protocol/http/AuthenticationInfo.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/sun/net/www/protocol/http/AuthenticationInfo.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
import java.net.URL;
import java.util.HashMap;
import java.util.Objects;
+import java.util.function.Function;
import sun.net.www.HeaderParser;
@@ -125,25 +126,42 @@
*/
private static HashMap<String,Thread> requests = new HashMap<>();
- /* check if a request for this destination is in progress
- * return false immediately if not. Otherwise block until
- * request is finished and return true
+ /*
+ * check if AuthenticationInfo is available in the cache.
+ * If not, check if a request for this destination is in progress
+ * and if so block until the other request is finished authenticating
+ * and returns the cached authentication value.
+ * Otherwise, returns the cached authentication value, which may be null.
*/
- private static boolean requestIsInProgress (String key) {
- if (!serializeAuth) {
- /* behavior is disabled. Revert to concurrent requests */
- return false;
+ private static AuthenticationInfo requestAuthentication(String key, Function<String, AuthenticationInfo> cache) {
+ AuthenticationInfo cached = cache.apply(key);
+ if (cached != null || !serializeAuth) {
+ // either we already have a value in the cache, and we can
+ // use that immediately, or the serializeAuth behavior is disabled,
+ // and we can revert to concurrent requests
+ return cached;
}
synchronized (requests) {
+ // check again after synchronizing, and if available
+ // just return the cached value.
+ cached = cache.apply(key);
+ if (cached != null) return cached;
+
+ // Otherwise, if no request is in progress, record this
+ // thread as performing authentication and returns null.
Thread t, c;
c = Thread.currentThread();
if ((t = requests.get(key)) == null) {
requests.put (key, c);
- return false;
+ assert cached == null;
+ return cached;
}
if (t == c) {
- return false;
+ assert cached == null;
+ return cached;
}
+ // Otherwise, an other thread is currently performing authentication:
+ // wait until it finishes.
while (requests.containsKey(key)) {
try {
requests.wait ();
@@ -151,7 +169,7 @@
}
}
/* entry may be in cache now. */
- return true;
+ return cache.apply(key);
}
/* signal completion of an authentication (whether it succeeded or not)
@@ -318,13 +336,13 @@
return key;
}
+ private static AuthenticationInfo getCachedServerAuth(String key) {
+ return getAuth(key, null);
+ }
+
static AuthenticationInfo getServerAuth(String key) {
- AuthenticationInfo cached = getAuth(key, null);
- if ((cached == null) && requestIsInProgress (key)) {
- /* check the cache again, it might contain an entry */
- cached = getAuth(key, null);
- }
- return cached;
+ if (!serializeAuth) return getCachedServerAuth(key);
+ return requestAuthentication(key, AuthenticationInfo::getCachedServerAuth);
}
@@ -367,13 +385,13 @@
return key;
}
+ private static AuthenticationInfo getCachedProxyAuth(String key) {
+ return (AuthenticationInfo) cache.get(key, null);
+ }
+
static AuthenticationInfo getProxyAuth(String key) {
- AuthenticationInfo cached = (AuthenticationInfo) cache.get(key, null);
- if ((cached == null) && requestIsInProgress (key)) {
- /* check the cache again, it might contain an entry */
- cached = (AuthenticationInfo) cache.get(key, null);
- }
- return cached;
+ if (!serializeAuth) return getCachedProxyAuth(key);
+ return requestAuthentication(key, AuthenticationInfo::getCachedProxyAuth);
}
--- a/src/java.base/share/classes/sun/security/ssl/DTLSInputRecord.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/sun/security/ssl/DTLSInputRecord.java Mon Aug 19 21:14:34 2019 -0400
@@ -359,7 +359,19 @@
return null;
}
+ // Fail fast for unknown handshake message.
byte handshakeType = plaintextFragment.get(); // pos: 0
+ if (!SSLHandshake.isKnown(handshakeType)) {
+ if (SSLLogger.isOn && SSLLogger.isOn("ssl")) {
+ SSLLogger.fine("Discard invalid record: " +
+ "unknown handshake type size, Handshake.msg_type = " +
+ (handshakeType & 0xFF));
+ }
+
+ // invalid, discard this record [section 4.1.2.7, RFC 6347]
+ return null;
+ }
+
int messageLength =
((plaintextFragment.get() & 0xFF) << 16) |
((plaintextFragment.get() & 0xFF) << 8) |
--- a/src/java.base/share/classes/sun/security/ssl/SSLEngineInputRecord.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/sun/security/ssl/SSLEngineInputRecord.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -287,8 +287,15 @@
}
handshakeFrag.mark();
- // skip the first byte: handshake type
+
+ // Fail fast for unknown handshake message.
byte handshakeType = handshakeFrag.get();
+ if (!SSLHandshake.isKnown(handshakeType)) {
+ throw new SSLProtocolException(
+ "Unknown handshake type size, Handshake.msg_type = " +
+ (handshakeType & 0xFF));
+ }
+
int handshakeBodyLen = Record.getInt24(handshakeFrag);
handshakeFrag.reset();
int handshakeMessageLen =
--- a/src/java.base/share/classes/sun/security/ssl/SSLHandshake.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/sun/security/ssl/SSLHandshake.java Mon Aug 19 21:14:34 2019 -0400
@@ -497,6 +497,16 @@
return "UNKNOWN-HANDSHAKE-MESSAGE(" + id + ")";
}
+ static boolean isKnown(byte id) {
+ for (SSLHandshake hs : SSLHandshake.values()) {
+ if (hs.id == id && id != NOT_APPLICABLE.id) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
static final void kickstart(HandshakeContext context) throws IOException {
if (context instanceof ClientHandshakeContext) {
// For initial handshaking, including session resumption,
--- a/src/java.base/share/classes/sun/security/ssl/SSLSocketInputRecord.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/classes/sun/security/ssl/SSLSocketInputRecord.java Mon Aug 19 21:14:34 2019 -0400
@@ -302,8 +302,15 @@
}
handshakeFrag.mark();
- // skip the first byte: handshake type
+
+ // Fail fast for unknown handshake message.
byte handshakeType = handshakeFrag.get();
+ if (!SSLHandshake.isKnown(handshakeType)) {
+ throw new SSLProtocolException(
+ "Unknown handshake type size, Handshake.msg_type = " +
+ (handshakeType & 0xFF));
+ }
+
int handshakeBodyLen = Record.getInt24(handshakeFrag);
handshakeFrag.reset();
int handshakeMessageLen =
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/java.base/share/classes/sun/security/util/LazyCodeSourcePermissionCollection.java Mon Aug 19 21:14:34 2019 -0400
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.security.util;
+
+import java.io.File;
+import java.io.FilePermission;
+import java.io.IOException;
+import java.net.URL;
+import java.security.CodeSource;
+import java.security.Permission;
+import java.security.PermissionCollection;
+import java.util.Enumeration;
+
+/**
+ * This {@code PermissionCollection} implementation delegates to another
+ * {@code PermissionCollection}, taking care to lazily add the permission needed
+ * to read from the given {@code CodeSource} at first use, i.e., when either of
+ * {@link #elements}, {@link #implies} or {@link #toString} is called, or when
+ * the collection is serialized.
+ */
+public final class LazyCodeSourcePermissionCollection
+ extends PermissionCollection
+{
+ private static final long serialVersionUID = -6727011328946861783L;
+ private final PermissionCollection perms;
+ private final CodeSource cs;
+ private volatile boolean permissionAdded;
+
+ public LazyCodeSourcePermissionCollection(PermissionCollection perms,
+ CodeSource cs) {
+ this.perms = perms;
+ this.cs = cs;
+ }
+
+ private void ensureAdded() {
+ if (!permissionAdded) {
+ synchronized(perms) {
+ if (permissionAdded)
+ return;
+
+ // open connection to determine the permission needed
+ URL location = cs.getLocation();
+ if (location != null) {
+ try {
+ Permission p = location.openConnection().getPermission();
+ if (p != null) {
+ // for directories then need recursive access
+ if (p instanceof FilePermission) {
+ String path = p.getName();
+ if (path.endsWith(File.separator)) {
+ path += "-";
+ p = new FilePermission(path,
+ SecurityConstants.FILE_READ_ACTION);
+ }
+ }
+ perms.add(p);
+ }
+ } catch (IOException ioe) {
+ }
+ }
+ if (isReadOnly()) {
+ perms.setReadOnly();
+ }
+ permissionAdded = true;
+ }
+ }
+ }
+
+ @Override
+ public void add(Permission permission) {
+ if (isReadOnly())
+ throw new SecurityException(
+ "attempt to add a Permission to a readonly PermissionCollection");
+ perms.add(permission);
+ }
+
+ @Override
+ public boolean implies(Permission permission) {
+ ensureAdded();
+ return perms.implies(permission);
+ }
+
+ @Override
+ public Enumeration<Permission> elements() {
+ ensureAdded();
+ return perms.elements();
+ }
+
+ @Override
+ public String toString() {
+ ensureAdded();
+ return perms.toString();
+ }
+
+ /**
+ * On serialization, initialize and replace with the underlying
+ * permissions. This removes the laziness on deserialization.
+ */
+ private Object writeReplace() {
+ ensureAdded();
+ return perms;
+ }
+}
--- a/src/java.base/share/conf/security/java.security Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/share/conf/security/java.security Mon Aug 19 21:14:34 2019 -0400
@@ -1170,8 +1170,9 @@
# Disabled mechanisms for the Simple Authentication and Security Layer (SASL)
#
# Disabled mechanisms will not be negotiated by both SASL clients and servers.
-# These mechanisms will be ignored if they are specified in the mechanisms argument
-# of `Sasl.createClient` or the mechanism argument of `Sasl.createServer`.
+# These mechanisms will be ignored if they are specified in the "mechanisms"
+# argument of "Sasl.createSaslClient" or the "mechanism" argument of
+# "Sasl.createSaslServer".
#
# The value of this property is a comma-separated list of SASL mechanisms.
# The mechanisms are case-sensitive. Whitespaces around the commas are ignored.
--- a/src/java.base/windows/classes/sun/net/www/protocol/http/ntlm/NTLMAuthentication.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/windows/classes/sun/net/www/protocol/http/ntlm/NTLMAuthentication.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -202,7 +202,17 @@
}
}
- static native boolean isTrustedSite(String url);
+ private static final boolean isTrustedSiteAvailable = isTrustedSiteAvailable();
+
+ private static native boolean isTrustedSiteAvailable();
+
+ private static boolean isTrustedSite(String url) {
+ if (isTrustedSiteAvailable)
+ return isTrustedSite0(url);
+ return false;
+ }
+
+ private static native boolean isTrustedSite0(String url);
/**
* Not supported. Must use the setHeaders() method
--- a/src/java.base/windows/native/libnet/NTLMAuthentication.c Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.base/windows/native/libnet/NTLMAuthentication.c Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,19 +26,44 @@
#include <jni.h>
#include <windows.h>
#include "jni_util.h"
+#include "jdk_util.h"
#include <urlmon.h>
-JNIEXPORT jboolean JNICALL Java_sun_net_www_protocol_http_ntlm_NTLMAuthentication_isTrustedSite(JNIEnv *env, jclass clazz, jstring url )
+typedef HRESULT (WINAPI *CoInternetCreateSecurityManagerType)
+ (IServiceProvider*,IInternetSecurityManager**,DWORD);
+
+static CoInternetCreateSecurityManagerType fn_CoInternetCreateSecurityManager;
+
+JNIEXPORT jboolean JNICALL
+Java_sun_net_www_protocol_http_ntlm_NTLMAuthentication_isTrustedSiteAvailable
+ (JNIEnv *env, jclass clazz)
{
+ HMODULE libUrlmon = JDK_LoadSystemLibrary("urlmon.dll");
+ if (libUrlmon != NULL) {
+ fn_CoInternetCreateSecurityManager = (CoInternetCreateSecurityManagerType)
+ GetProcAddress(libUrlmon, "CoInternetCreateSecurityManager");
+ if (fn_CoInternetCreateSecurityManager != NULL) {
+ return JNI_TRUE;
+ }
+ }
+ return JNI_FALSE;
+}
+JNIEXPORT jboolean JNICALL
+Java_sun_net_www_protocol_http_ntlm_NTLMAuthentication_isTrustedSite0
+ (JNIEnv *env, jclass clazz, jstring url)
+{
HRESULT hr;
DWORD dwZone;
DWORD pPolicy = 0;
IInternetSecurityManager *spSecurityManager;
jboolean ret;
+ if (fn_CoInternetCreateSecurityManager == NULL)
+ return JNI_FALSE;
+
// Create IInternetSecurityManager
- hr = CoInternetCreateSecurityManager(NULL, &spSecurityManager, (DWORD)0);
+ hr = fn_CoInternetCreateSecurityManager(NULL, &spSecurityManager, (DWORD)0);
if (FAILED(hr)) {
return JNI_FALSE;
}
--- a/src/java.sql.rowset/share/classes/javax/sql/rowset/spi/SyncProvider.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/spi/SyncProvider.java Mon Aug 19 21:14:34 2019 -0400
@@ -90,13 +90,6 @@
* com.sun.rowset.providers.RIOptimisticProvider
* </pre>
* <p>
- * A vendor can register a <code>SyncProvider</code> implementation class name
- * with Oracle Corporation by sending email to jdbc@sun.com.
- * Oracle will maintain a database listing the
- * available <code>SyncProvider</code> implementations for use with compliant
- * <code>RowSet</code> implementations. This database will be similar to the
- * one already maintained to list available JDBC drivers.
- * <P>
* Vendors should refer to the reference implementation synchronization
* providers for additional guidance on how to implement a new
* <code>SyncProvider</code> implementation.
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java Mon Aug 19 21:14:34 2019 -0400
@@ -203,6 +203,8 @@
{"StubRoutines::_aescrypt_decryptBlock", "_aot_stub_routines_aescrypt_decryptBlock"},
{"StubRoutines::_cipherBlockChaining_encryptAESCrypt", "_aot_stub_routines_cipherBlockChaining_encryptAESCrypt"},
{"StubRoutines::_cipherBlockChaining_decryptAESCrypt", "_aot_stub_routines_cipherBlockChaining_decryptAESCrypt"},
+ {"StubRoutines::_electronicCodeBook_encryptAESCrypt", "_aot_stub_routines_electronicCodeBook_encryptAESCrypt"},
+ {"StubRoutines::_electronicCodeBook_decryptAESCrypt", "_aot_stub_routines_electronicCodeBook_decryptAESCrypt"},
{"StubRoutines::_updateBytesCRC32", "_aot_stub_routines_update_bytes_crc32"},
{"StubRoutines::_crc_table_adr", "_aot_stub_routines_crc_table_adr"},
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Mon Aug 19 21:14:34 2019 -0400
@@ -655,14 +655,7 @@
}
if (isOopType) {
- // HACK: turn markOop into a C integer type. This allows
- // proper handling of it in the Serviceability Agent. (FIXME
- // -- consider doing something different here)
- if (typeName.equals("markOop")) {
- type = new BasicCIntegerType(this, typeName, true);
- } else {
- type.setIsOopType(true);
- }
+ type.setIsOopType(true);
}
}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SALauncher.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SALauncher.java Mon Aug 19 21:14:34 2019 -0400
@@ -26,6 +26,11 @@
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
import sun.jvm.hotspot.tools.JStack;
import sun.jvm.hotspot.tools.JMap;
@@ -132,44 +137,51 @@
}
private static boolean toolHelp(String toolName) {
- if (toolName.equals("jstack")) {
- return jstackHelp();
- }
- if (toolName.equals("jinfo")) {
- return jinfoHelp();
- }
- if (toolName.equals("jmap")) {
- return jmapHelp();
+ switch (toolName) {
+ case "jstack":
+ return jstackHelp();
+ case "jinfo":
+ return jinfoHelp();
+ case "jmap":
+ return jmapHelp();
+ case "jsnap":
+ return jsnapHelp();
+ case "debugd":
+ return debugdHelp();
+ case "hsdb":
+ case "clhsdb":
+ return commonHelp(toolName);
+ default:
+ return launcherHelp();
}
- if (toolName.equals("jsnap")) {
- return jsnapHelp();
- }
- if (toolName.equals("debugd")) {
- return debugdHelp();
- }
- if (toolName.equals("hsdb")) {
- return commonHelp("hsdb");
- }
- if (toolName.equals("clhsdb")) {
- return commonHelp("clhsdb");
- }
- return launcherHelp();
}
private static final String NO_REMOTE = null;
- private static void buildAttachArgs(ArrayList<String> newArgs, String pid,
- String exe, String core, String remote, boolean allowEmpty) {
- if (!allowEmpty && (pid == null) && (exe == null) && (remote == NO_REMOTE)) {
+ private static String[] buildAttachArgs(Map<String, String> newArgMap,
+ boolean allowEmpty) {
+ String pid = newArgMap.remove("pid");
+ String exe = newArgMap.remove("exe");
+ String core = newArgMap.remove("core");
+ String connect = newArgMap.remove("connect");
+ if (!allowEmpty && (pid == null) && (exe == null) && (connect == NO_REMOTE)) {
throw new SAGetoptException("You have to set --pid or --exe or --connect.");
}
+ List<String> newArgs = new ArrayList<>();
+ for (var entry : newArgMap.entrySet()) {
+ newArgs.add(entry.getKey());
+ if (entry.getValue() != null) {
+ newArgs.add(entry.getValue());
+ }
+ }
+
if (pid != null) { // Attach to live process
if (exe != null) {
throw new SAGetoptException("Unnecessary argument: --exe");
} else if (core != null) {
throw new SAGetoptException("Unnecessary argument: --core");
- } else if (remote != NO_REMOTE) {
+ } else if (connect != NO_REMOTE) {
throw new SAGetoptException("Unnecessary argument: --connect");
} else if (!pid.matches("^\\d+$")) {
throw new SAGetoptException("Invalid pid: " + pid);
@@ -177,7 +189,7 @@
newArgs.add(pid);
} else if (exe != null) {
- if (remote != NO_REMOTE) {
+ if (connect != NO_REMOTE) {
throw new SAGetoptException("Unnecessary argument: --connect");
} else if (exe.length() == 0) {
throw new SAGetoptException("You have to set --exe.");
@@ -190,264 +202,144 @@
}
newArgs.add(core);
- } else if (remote != NO_REMOTE) {
- newArgs.add(remote);
+ } else if (connect != NO_REMOTE) {
+ newArgs.add(connect);
}
+
+ return newArgs.toArray(new String[0]);
+ }
+
+ /**
+ * This method converts jhsdb-style options (oldArgs) to old fashioned
+ * style. SALauncher delegates the work to the entry point of each tool.
+ * Thus we need to convert the arguments.
+ * For example, `jhsdb jstack --mixed` needs to be converted to `jstack -m`.
+ *
+ * longOptsMap holds the rule how this method should convert the args.
+ * The key is the name of jhsdb style, the value is the name of
+ * old fashioned style. If you want to convert mixed option in jstack,
+ * you need to set "mixed" to the key, and to set "-m" to the value
+ * in longOptsMap. If the option have the value, you need to add "=" to
+ * the key like "exe=".
+ *
+ * You also can set the options which cannot be mapped to old fashioned
+ * arguments. For example, `jhsdb jmap --binaryheap` cannot be mapped to
+ * `jmap` option directly. But you set it to longOptsMap, then you can know
+ * the user sets "binaryheap" option, and SALauncher should set
+ * "-heap:format:b" to jmap option.
+ *
+ * This method returns the map of the old fashioned key/val pairs.
+ * It can be used to build args in string array at buildAttachArgs().
+ */
+ private static Map<String, String> parseOptions(String[] oldArgs,
+ Map<String, String> longOptsMap) {
+ SAGetopt sg = new SAGetopt(oldArgs);
+ String[] longOpts = longOptsMap.keySet().toArray(new String[0]);
+ Map<String, String> newArgMap = new HashMap<>();
+
+ /*
+ * Parse each jhsdb-style option via SAGetopt.
+ * SAGetopt parses and validates the argument. If the user passes invalid
+ * option, SAGetoptException will be occurred at SAGetopt::next.
+ * Thus there is no need to validate it here.
+ *
+ * We can get option value via SAGetopt::get. If jhsdb-style option has
+ * '=' at the tail, we put old fashioned option with it to newArgMap.
+ */
+ String s;
+ while ((s = sg.next(null, longOpts)) != null) {
+ var val = longOptsMap.get(s);
+ if (val != null) {
+ newArgMap.put(val, null);
+ } else {
+ val = longOptsMap.get(s + "=");
+ if (val != null) {
+ newArgMap.put(val, sg.getOptarg());
+ }
+ }
+ }
+
+ return newArgMap;
}
private static void runCLHSDB(String[] oldArgs) {
- SAGetopt sg = new SAGetopt(oldArgs);
- String[] longOpts = {"exe=", "core=", "pid="};
-
- ArrayList<String> newArgs = new ArrayList();
- String pid = null;
- String exe = null;
- String core = null;
- String s = null;
-
- while((s = sg.next(null, longOpts)) != null) {
- if (s.equals("exe")) {
- exe = sg.getOptarg();
- continue;
- }
- if (s.equals("core")) {
- core = sg.getOptarg();
- continue;
- }
- if (s.equals("pid")) {
- pid = sg.getOptarg();
- continue;
- }
- }
-
- buildAttachArgs(newArgs, pid, exe, core, NO_REMOTE, true);
- CLHSDB.main(newArgs.toArray(new String[newArgs.size()]));
+ Map<String, String> longOptsMap = Map.of("exe=", "exe",
+ "core=", "core",
+ "pid=", "pid");
+ Map<String, String> newArgMap = parseOptions(oldArgs, longOptsMap);
+ CLHSDB.main(buildAttachArgs(newArgMap, true));
}
private static void runHSDB(String[] oldArgs) {
- SAGetopt sg = new SAGetopt(oldArgs);
- String[] longOpts = {"exe=", "core=", "pid="};
-
- ArrayList<String> newArgs = new ArrayList();
- String pid = null;
- String exe = null;
- String core = null;
- String s = null;
-
- while((s = sg.next(null, longOpts)) != null) {
- if (s.equals("exe")) {
- exe = sg.getOptarg();
- continue;
- }
- if (s.equals("core")) {
- core = sg.getOptarg();
- continue;
- }
- if (s.equals("pid")) {
- pid = sg.getOptarg();
- continue;
- }
- }
-
- buildAttachArgs(newArgs, pid, exe, core, NO_REMOTE, true);
- HSDB.main(newArgs.toArray(new String[newArgs.size()]));
+ Map<String, String> longOptsMap = Map.of("exe=", "exe",
+ "core=", "core",
+ "pid=", "pid");
+ Map<String, String> newArgMap = parseOptions(oldArgs, longOptsMap);
+ HSDB.main(buildAttachArgs(newArgMap, true));
}
private static void runJSTACK(String[] oldArgs) {
- SAGetopt sg = new SAGetopt(oldArgs);
- String[] longOpts = {"exe=", "core=", "pid=", "connect=",
- "mixed", "locks"};
-
- ArrayList<String> newArgs = new ArrayList();
- String pid = null;
- String exe = null;
- String core = null;
- String remote = NO_REMOTE;
- String s = null;
-
- while((s = sg.next(null, longOpts)) != null) {
- if (s.equals("exe")) {
- exe = sg.getOptarg();
- continue;
- }
- if (s.equals("core")) {
- core = sg.getOptarg();
- continue;
- }
- if (s.equals("pid")) {
- pid = sg.getOptarg();
- continue;
- }
- if (s.equals("connect")) {
- remote = sg.getOptarg();
- continue;
- }
- if (s.equals("mixed")) {
- newArgs.add("-m");
- continue;
- }
- if (s.equals("locks")) {
- newArgs.add("-l");
- continue;
- }
- }
-
- buildAttachArgs(newArgs, pid, exe, core, remote, false);
+ Map<String, String> longOptsMap = Map.of("exe=", "exe",
+ "core=", "core",
+ "pid=", "pid",
+ "connect=", "connect",
+ "mixed", "-m",
+ "locks", "-l");
+ Map<String, String> newArgMap = parseOptions(oldArgs, longOptsMap);
JStack jstack = new JStack(false, false);
- jstack.runWithArgs(newArgs.toArray(new String[newArgs.size()]));
+ jstack.runWithArgs(buildAttachArgs(newArgMap, false));
}
private static void runJMAP(String[] oldArgs) {
- SAGetopt sg = new SAGetopt(oldArgs);
- String[] longOpts = {"exe=", "core=", "pid=", "connect=",
- "heap", "binaryheap", "dumpfile=", "histo", "clstats", "finalizerinfo"};
-
- ArrayList<String> newArgs = new ArrayList();
- String pid = null;
- String exe = null;
- String core = null;
- String remote = NO_REMOTE;
- String s = null;
- String dumpfile = null;
- boolean requestHeapdump = false;
+ Map<String, String> longOptsMap = Map.of("exe=", "exe",
+ "core=", "core",
+ "pid=", "pid",
+ "connect=", "connect",
+ "heap", "-heap",
+ "binaryheap", "binaryheap",
+ "dumpfile=", "dumpfile",
+ "histo", "-histo",
+ "clstats", "-clstats",
+ "finalizerinfo", "-finalizerinfo");
+ Map<String, String> newArgMap = parseOptions(oldArgs, longOptsMap);
- while((s = sg.next(null, longOpts)) != null) {
- if (s.equals("exe")) {
- exe = sg.getOptarg();
- continue;
- }
- if (s.equals("core")) {
- core = sg.getOptarg();
- continue;
- }
- if (s.equals("pid")) {
- pid = sg.getOptarg();
- continue;
- }
- if (s.equals("connect")) {
- remote = sg.getOptarg();
- continue;
- }
- if (s.equals("heap")) {
- newArgs.add("-heap");
- continue;
- }
- if (s.equals("binaryheap")) {
- requestHeapdump = true;
- continue;
- }
- if (s.equals("dumpfile")) {
- dumpfile = sg.getOptarg();
- continue;
- }
- if (s.equals("histo")) {
- newArgs.add("-histo");
- continue;
- }
- if (s.equals("clstats")) {
- newArgs.add("-clstats");
- continue;
- }
- if (s.equals("finalizerinfo")) {
- newArgs.add("-finalizerinfo");
- continue;
+ boolean requestHeapdump = newArgMap.containsKey("binaryheap");
+ String dumpfile = newArgMap.get("dumpfile");
+ if (!requestHeapdump && (dumpfile != null)) {
+ throw new IllegalArgumentException("Unexpected argument: dumpfile");
+ }
+ if (requestHeapdump) {
+ if (dumpfile == null) {
+ newArgMap.put("-heap:format=b", null);
+ } else {
+ newArgMap.put("-heap:format=b,file=" + dumpfile, null);
}
}
- if (!requestHeapdump && (dumpfile != null)) {
- throw new IllegalArgumentException("Unexpected argument dumpfile");
- }
- if (requestHeapdump) {
- if (dumpfile == null) {
- newArgs.add("-heap:format=b");
- } else {
- newArgs.add("-heap:format=b,file=" + dumpfile);
- }
- }
-
- buildAttachArgs(newArgs, pid, exe, core, remote, false);
- JMap.main(newArgs.toArray(new String[newArgs.size()]));
+ newArgMap.remove("binaryheap");
+ newArgMap.remove("dumpfile");
+ JMap.main(buildAttachArgs(newArgMap, false));
}
private static void runJINFO(String[] oldArgs) {
- SAGetopt sg = new SAGetopt(oldArgs);
- String[] longOpts = {"exe=", "core=", "pid=", "connect=",
- "flags", "sysprops"};
-
- ArrayList<String> newArgs = new ArrayList();
- String exe = null;
- String pid = null;
- String core = null;
- String remote = NO_REMOTE;
- String s = null;
-
- while((s = sg.next(null, longOpts)) != null) {
- if (s.equals("exe")) {
- exe = sg.getOptarg();
- continue;
- }
- if (s.equals("core")) {
- core = sg.getOptarg();
- continue;
- }
- if (s.equals("pid")) {
- pid = sg.getOptarg();
- continue;
- }
- if (s.equals("connect")) {
- remote = sg.getOptarg();
- continue;
- }
- if (s.equals("flags")) {
- newArgs.add("-flags");
- continue;
- }
- if (s.equals("sysprops")) {
- newArgs.add("-sysprops");
- continue;
- }
- }
-
- buildAttachArgs(newArgs, pid, exe, core, remote, false);
- JInfo.main(newArgs.toArray(new String[newArgs.size()]));
+ Map<String, String> longOptsMap = Map.of("exe=", "exe",
+ "core=", "core",
+ "pid=", "pid",
+ "connect=", "connect",
+ "flags", "-flags",
+ "sysprops", "-sysprops");
+ Map<String, String> newArgMap = parseOptions(oldArgs, longOptsMap);
+ JInfo.main(buildAttachArgs(newArgMap, false));
}
private static void runJSNAP(String[] oldArgs) {
- SAGetopt sg = new SAGetopt(oldArgs);
- String[] longOpts = {"exe=", "core=", "pid=", "connect=", "all"};
-
- ArrayList<String> newArgs = new ArrayList();
- String exe = null;
- String pid = null;
- String core = null;
- String remote = NO_REMOTE;
- String s = null;
-
- while((s = sg.next(null, longOpts)) != null) {
- if (s.equals("exe")) {
- exe = sg.getOptarg();
- continue;
- }
- if (s.equals("core")) {
- core = sg.getOptarg();
- continue;
- }
- if (s.equals("pid")) {
- pid = sg.getOptarg();
- continue;
- }
- if (s.equals("connect")) {
- remote = sg.getOptarg();
- continue;
- }
- if (s.equals("all")) {
- newArgs.add("-a");
- continue;
- }
- }
-
- buildAttachArgs(newArgs, pid, exe, core, remote, false);
- JSnap.main(newArgs.toArray(new String[newArgs.size()]));
+ Map<String, String> longOptsMap = Map.of("exe=", "exe",
+ "core=", "core",
+ "pid=", "pid",
+ "connect=", "connect",
+ "all", "-a");
+ Map<String, String> newArgMap = parseOptions(oldArgs, longOptsMap);
+ JSnap.main(buildAttachArgs(newArgMap, false));
}
private static void runDEBUGD(String[] oldArgs) {
@@ -457,44 +349,34 @@
// attaching to SA agent.
System.setProperty("sun.jvm.hotspot.debugger.useWindbgDebugger", "true");
- SAGetopt sg = new SAGetopt(oldArgs);
- String[] longOpts = {"exe=", "core=", "pid=", "serverid="};
-
- ArrayList<String> newArgs = new ArrayList<>();
- String exe = null;
- String pid = null;
- String core = null;
- String s = null;
- String serverid = null;
+ Map<String, String> longOptsMap = Map.of("exe=", "exe",
+ "core=", "core",
+ "pid=", "pid",
+ "serverid=", "serverid");
+ Map<String, String> newArgMap = parseOptions(oldArgs, longOptsMap);
+ var serverid = newArgMap.remove("serverid");
+ List<String> newArgArray = new ArrayList<>();
+ newArgArray.addAll(Arrays.asList(buildAttachArgs(newArgMap, false)));
- while((s = sg.next(null, longOpts)) != null) {
- if (s.equals("exe")) {
- exe = sg.getOptarg();
- continue;
- }
- if (s.equals("core")) {
- core = sg.getOptarg();
- continue;
- }
- if (s.equals("pid")) {
- pid = sg.getOptarg();
- continue;
- }
- if (s.equals("serverid")) {
- serverid = sg.getOptarg();
- continue;
- }
- }
-
- buildAttachArgs(newArgs, pid, exe, core, NO_REMOTE, false);
+ // `serverid` must be located at the tail.
if (serverid != null) {
- newArgs.add(serverid);
+ newArgArray.add(serverid);
}
// delegate to the actual SA debug server.
- sun.jvm.hotspot.DebugServer.main(newArgs.toArray(new String[newArgs.size()]));
+ DebugServer.main(newArgArray.toArray(new String[0]));
}
+ // Key: tool name, Value: launcher method
+ private static Map<String, Consumer<String[]>> toolMap =
+ Map.of("clhsdb", SALauncher::runCLHSDB,
+ "hsdb", SALauncher::runHSDB,
+ "jstack", SALauncher::runJSTACK,
+ "jmap", SALauncher::runJMAP,
+ "jinfo", SALauncher::runJINFO,
+ "jsnap", SALauncher::runJSNAP,
+ "debugd", SALauncher::runDEBUGD);
+
public static void main(String[] args) {
// Provide a help
if (args.length == 0) {
@@ -517,44 +399,12 @@
String[] oldArgs = Arrays.copyOfRange(args, 1, args.length);
try {
- // Run SA interactive mode
- if (args[0].equals("clhsdb")) {
- runCLHSDB(oldArgs);
- return;
- }
-
- if (args[0].equals("hsdb")) {
- runHSDB(oldArgs);
- return;
- }
-
- // Run SA tmtools mode
- if (args[0].equals("jstack")) {
- runJSTACK(oldArgs);
- return;
+ var func = toolMap.get(args[0]);
+ if (func == null) {
+ throw new SAGetoptException("Unknown tool: " + args[0]);
+ } else {
+ func.accept(oldArgs);
}
-
- if (args[0].equals("jmap")) {
- runJMAP(oldArgs);
- return;
- }
-
- if (args[0].equals("jinfo")) {
- runJINFO(oldArgs);
- return;
- }
-
- if (args[0].equals("jsnap")) {
- runJSNAP(oldArgs);
- return;
- }
-
- if (args[0].equals("debugd")) {
- runDEBUGD(oldArgs);
- return;
- }
-
- throw new SAGetoptException("Unknown tool: " + args[0]);
} catch (SAGetoptException e) {
System.err.println(e.getMessage());
toolHelp(args[0]);
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java Mon Aug 19 21:14:34 2019 -0400
@@ -32,12 +32,6 @@
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
-/** Mark is the analogue of the VM's markOop. In this system it does
- not subclass Oop but VMObject. For a mark on the stack, the mark's
- address will be an Address; for a mark in the header of an object,
- it will be an OopHandle. It is assumed in a couple of places in
- this code that the mark is the first word in an object. */
-
public class Mark extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
@@ -51,39 +45,39 @@
Type type = db.lookupType("oopDesc");
markField = type.getCIntegerField("_mark");
- ageBits = db.lookupLongConstant("markOopDesc::age_bits").longValue();
- lockBits = db.lookupLongConstant("markOopDesc::lock_bits").longValue();
- biasedLockBits = db.lookupLongConstant("markOopDesc::biased_lock_bits").longValue();
- maxHashBits = db.lookupLongConstant("markOopDesc::max_hash_bits").longValue();
- hashBits = db.lookupLongConstant("markOopDesc::hash_bits").longValue();
- lockShift = db.lookupLongConstant("markOopDesc::lock_shift").longValue();
- biasedLockShift = db.lookupLongConstant("markOopDesc::biased_lock_shift").longValue();
- ageShift = db.lookupLongConstant("markOopDesc::age_shift").longValue();
- hashShift = db.lookupLongConstant("markOopDesc::hash_shift").longValue();
- lockMask = db.lookupLongConstant("markOopDesc::lock_mask").longValue();
- lockMaskInPlace = db.lookupLongConstant("markOopDesc::lock_mask_in_place").longValue();
- biasedLockMask = db.lookupLongConstant("markOopDesc::biased_lock_mask").longValue();
- biasedLockMaskInPlace = db.lookupLongConstant("markOopDesc::biased_lock_mask_in_place").longValue();
- biasedLockBitInPlace = db.lookupLongConstant("markOopDesc::biased_lock_bit_in_place").longValue();
- ageMask = db.lookupLongConstant("markOopDesc::age_mask").longValue();
- ageMaskInPlace = db.lookupLongConstant("markOopDesc::age_mask_in_place").longValue();
- hashMask = db.lookupLongConstant("markOopDesc::hash_mask").longValue();
- hashMaskInPlace = db.lookupLongConstant("markOopDesc::hash_mask_in_place").longValue();
- biasedLockAlignment = db.lookupLongConstant("markOopDesc::biased_lock_alignment").longValue();
- lockedValue = db.lookupLongConstant("markOopDesc::locked_value").longValue();
- unlockedValue = db.lookupLongConstant("markOopDesc::unlocked_value").longValue();
- monitorValue = db.lookupLongConstant("markOopDesc::monitor_value").longValue();
- markedValue = db.lookupLongConstant("markOopDesc::marked_value").longValue();
- biasedLockPattern = db.lookupLongConstant("markOopDesc::biased_lock_pattern").longValue();
- noHash = db.lookupLongConstant("markOopDesc::no_hash").longValue();
- noHashInPlace = db.lookupLongConstant("markOopDesc::no_hash_in_place").longValue();
- noLockInPlace = db.lookupLongConstant("markOopDesc::no_lock_in_place").longValue();
- maxAge = db.lookupLongConstant("markOopDesc::max_age").longValue();
+ ageBits = db.lookupLongConstant("markWord::age_bits").longValue();
+ lockBits = db.lookupLongConstant("markWord::lock_bits").longValue();
+ biasedLockBits = db.lookupLongConstant("markWord::biased_lock_bits").longValue();
+ maxHashBits = db.lookupLongConstant("markWord::max_hash_bits").longValue();
+ hashBits = db.lookupLongConstant("markWord::hash_bits").longValue();
+ lockShift = db.lookupLongConstant("markWord::lock_shift").longValue();
+ biasedLockShift = db.lookupLongConstant("markWord::biased_lock_shift").longValue();
+ ageShift = db.lookupLongConstant("markWord::age_shift").longValue();
+ hashShift = db.lookupLongConstant("markWord::hash_shift").longValue();
+ lockMask = db.lookupLongConstant("markWord::lock_mask").longValue();
+ lockMaskInPlace = db.lookupLongConstant("markWord::lock_mask_in_place").longValue();
+ biasedLockMask = db.lookupLongConstant("markWord::biased_lock_mask").longValue();
+ biasedLockMaskInPlace = db.lookupLongConstant("markWord::biased_lock_mask_in_place").longValue();
+ biasedLockBitInPlace = db.lookupLongConstant("markWord::biased_lock_bit_in_place").longValue();
+ ageMask = db.lookupLongConstant("markWord::age_mask").longValue();
+ ageMaskInPlace = db.lookupLongConstant("markWord::age_mask_in_place").longValue();
+ hashMask = db.lookupLongConstant("markWord::hash_mask").longValue();
+ hashMaskInPlace = db.lookupLongConstant("markWord::hash_mask_in_place").longValue();
+ biasedLockAlignment = db.lookupLongConstant("markWord::biased_lock_alignment").longValue();
+ lockedValue = db.lookupLongConstant("markWord::locked_value").longValue();
+ unlockedValue = db.lookupLongConstant("markWord::unlocked_value").longValue();
+ monitorValue = db.lookupLongConstant("markWord::monitor_value").longValue();
+ markedValue = db.lookupLongConstant("markWord::marked_value").longValue();
+ biasedLockPattern = db.lookupLongConstant("markWord::biased_lock_pattern").longValue();
+ noHash = db.lookupLongConstant("markWord::no_hash").longValue();
+ noHashInPlace = db.lookupLongConstant("markWord::no_hash_in_place").longValue();
+ noLockInPlace = db.lookupLongConstant("markWord::no_lock_in_place").longValue();
+ maxAge = db.lookupLongConstant("markWord::max_age").longValue();
- /* Constants in markOop used by CMS. */
- cmsShift = db.lookupLongConstant("markOopDesc::cms_shift").longValue();
- cmsMask = db.lookupLongConstant("markOopDesc::cms_mask").longValue();
- sizeShift = db.lookupLongConstant("markOopDesc::size_shift").longValue();
+ /* Constants in markWord used by CMS. */
+ cmsShift = db.lookupLongConstant("markWord::cms_shift").longValue();
+ cmsMask = db.lookupLongConstant("markWord::cms_mask").longValue();
+ sizeShift = db.lookupLongConstant("markWord::size_shift").longValue();
}
// Field accessors
@@ -125,7 +119,7 @@
private static long maxAge;
- /* Constants in markOop used by CMS. */
+ /* Constants in markWord used by CMS. */
private static long cmsShift;
private static long cmsMask;
private static long sizeShift;
@@ -175,7 +169,7 @@
return (Bits.maskBitsLong(value(), lockMaskInPlace) == markedValue);
}
- // Special temporary state of the markOop while being inflated.
+ // Special temporary state of the markWord while being inflated.
// Code that looks at mark outside a lock need to take this into account.
public boolean isBeingInflated() {
return (value() == 0);
@@ -188,12 +182,8 @@
// WARNING: The following routines are used EXCLUSIVELY by
// synchronization functions. They are not really gc safe.
- // They must get updated if markOop layout get changed.
+ // They must get updated if markWord layout get changed.
- // FIXME
- // markOop set_unlocked() const {
- // return markOop(value() | unlocked_value);
- // }
public boolean hasLocker() {
return ((value() & lockMaskInPlace) == lockedValue);
}
@@ -224,44 +214,7 @@
Address addr = valueAsAddress().andWithMask(~monitorValue);
return new Mark(addr.getAddressAt(0));
}
- // FIXME
- // void set_displaced_mark_helper(markOop m) const {
- // assert(has_displaced_mark_helper(), "check");
- // intptr_t ptr = (value() & ~monitor_value);
- // *(markOop*)ptr = m;
- // }
- // markOop copy_set_hash(intptr_t hash) const {
- // intptr_t tmp = value() & (~hash_mask_in_place);
- // tmp |= ((hash & hash_mask) << hash_shift);
- // return (markOop)tmp;
- // }
- // it is only used to be stored into BasicLock as the
- // indicator that the lock is using heavyweight monitor
- // static markOop unused_mark() {
- // return (markOop) marked_value;
- // }
- // // the following two functions create the markOop to be
- // // stored into object header, it encodes monitor info
- // static markOop encode(BasicLock* lock) {
- // return (markOop) lock;
- // }
- // static markOop encode(ObjectMonitor* monitor) {
- // intptr_t tmp = (intptr_t) monitor;
- // return (markOop) (tmp | monitor_value);
- // }
- // used for alignment-based marking to reuse the busy state to encode pointers
- // (see markOop_alignment.hpp)
- // markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
- //
- // // age operations
- // markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
- //
public int age() { return (int) Bits.maskBitsLong(value() >> ageShift, ageMask); }
- // markOop set_age(int v) const {
- // assert((v & ~age_mask) == 0, "shouldn't overflow age field");
- // return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
- // }
- // markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
// hash operations
public long hash() {
@@ -272,12 +225,6 @@
return hash() == noHash;
}
- // FIXME
- // Prototype mark for initialization
- // static markOop prototype() {
- // return markOop( no_hash_in_place | no_lock_in_place );
- // }
-
// Debugging
public void printOn(PrintStream tty) {
if (isLocked()) {
@@ -294,14 +241,7 @@
}
}
- // FIXME
- // // Prepare address of oop for placement into mark
- // inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
- //
- // // Recover address of oop from encoded form used in mark
- // inline void* decode_pointer() { return clear_lock_bits(); }
-
- // Copy markOop methods for CMS here.
+ // Copy markWord methods for CMS here.
public boolean isCmsFreeChunk() {
return isUnlocked() &&
(Bits.maskBitsLong(value() >> cmsShift, cmsMask) & 0x1L) == 0x1L;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java Mon Aug 19 21:14:34 2019 -0400
@@ -64,7 +64,7 @@
}
// FIXME
- // void set_header(markOop hdr);
+ // void set_header(markWord hdr);
// FIXME: must implement and delegate to platform-dependent implementation
// public boolean isBusy();
--- a/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/IntByReference.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/IntByReference.java Mon Aug 19 21:14:34 2019 -0400
@@ -4,7 +4,9 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/Kernel32Impl.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/Kernel32Impl.java Mon Aug 19 21:14:34 2019 -0400
@@ -4,7 +4,9 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/LastErrorException.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/LastErrorException.java Mon Aug 19 21:14:34 2019 -0400
@@ -4,7 +4,9 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/Pointer.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.internal.le/windows/classes/jdk/internal/org/jline/terminal/impl/jna/win/Pointer.java Mon Aug 19 21:14:34 2019 -0400
@@ -4,7 +4,9 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Mon Aug 19 21:14:34 2019 -0400
@@ -73,7 +73,7 @@
final int hubOffset = getFieldOffset("oopDesc::_metadata._klass", Integer.class, "Klass*");
- final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markOop");
+ final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markWord");
final int subklassOffset = getFieldOffset("Klass::_subklass", Integer.class, "Klass*");
final int superOffset = getFieldOffset("Klass::_super", Integer.class, "Klass*");
final int nextSiblingOffset = getFieldOffset("Klass::_next_sibling", Integer.class, "Klass*");
@@ -142,11 +142,11 @@
// This is only valid on AMD64.
final int runtimeCallStackSize = getConstant("frame::arg_reg_save_area_bytes", Integer.class, osArch.equals("amd64") ? null : 0);
- private final int markWordNoHashInPlace = getConstant("markOopDesc::no_hash_in_place", Integer.class);
- private final int markWordNoLockInPlace = getConstant("markOopDesc::no_lock_in_place", Integer.class);
+ private final int markWordNoHashInPlace = getConstant("markWord::no_hash_in_place", Integer.class);
+ private final int markWordNoLockInPlace = getConstant("markWord::no_lock_in_place", Integer.class);
/**
- * See {@code markOopDesc::prototype()}.
+ * See {@code markWord::prototype()}.
*/
long arrayPrototypeMarkWord() {
return markWordNoHashInPlace | markWordNoLockInPlace;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Mon Aug 19 21:14:34 2019 -0400
@@ -208,10 +208,10 @@
public final int stackBias = getConstant("STACK_BIAS", Integer.class);
public final int vmPageSize = getFieldValue("CompilerToVM::Data::vm_page_size", Integer.class, "int");
- public final int markOffset = getFieldOffset("oopDesc::_mark", Integer.class, "markOop");
+ public final int markOffset = getFieldOffset("oopDesc::_mark", Integer.class, "markWord");
public final int hubOffset = getFieldOffset("oopDesc::_metadata._klass", Integer.class, "Klass*");
- public final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markOop");
+ public final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markWord");
public final int subklassOffset = getFieldOffset("Klass::_subklass", Integer.class, "Klass*");
public final int nextSiblingOffset = getFieldOffset("Klass::_next_sibling", Integer.class, "Klass*");
public final int superCheckOffsetOffset = getFieldOffset("Klass::_super_check_offset", Integer.class, "juint");
@@ -445,17 +445,17 @@
public final int osThreadInterruptedOffset = getFieldOffset("OSThread::_interrupted", Integer.class, "jint");
- public final long markOopDescHashShift = getConstant("markOopDesc::hash_shift", Long.class);
+ public final long markWordHashShift = getConstant("markWord::hash_shift", Long.class);
- public final int biasedLockMaskInPlace = getConstant("markOopDesc::biased_lock_mask_in_place", Integer.class);
- public final int ageMaskInPlace = getConstant("markOopDesc::age_mask_in_place", Integer.class);
- public final int epochMaskInPlace = getConstant("markOopDesc::epoch_mask_in_place", Integer.class);
- public final long markOopDescHashMask = getConstant("markOopDesc::hash_mask", Long.class);
- public final long markOopDescHashMaskInPlace = getConstant("markOopDesc::hash_mask_in_place", Long.class);
+ public final int biasedLockMaskInPlace = getConstant("markWord::biased_lock_mask_in_place", Integer.class);
+ public final int ageMaskInPlace = getConstant("markWord::age_mask_in_place", Integer.class);
+ public final int epochMaskInPlace = getConstant("markWord::epoch_mask_in_place", Integer.class);
+ public final long markWordHashMask = getConstant("markWord::hash_mask", Long.class);
+ public final long markWordHashMaskInPlace = getConstant("markWord::hash_mask_in_place", Long.class);
- public final int unlockedMask = getConstant("markOopDesc::unlocked_value", Integer.class);
- public final int monitorMask = getConstant("markOopDesc::monitor_value", Integer.class, -1);
- public final int biasedLockPattern = getConstant("markOopDesc::biased_lock_pattern", Integer.class);
+ public final int unlockedMask = getConstant("markWord::unlocked_value", Integer.class);
+ public final int monitorMask = getConstant("markWord::monitor_value", Integer.class, -1);
+ public final int biasedLockPattern = getConstant("markWord::biased_lock_pattern", Integer.class);
// This field has no type in vmStructs.cpp
public final int objectMonitorOwner = getFieldOffset("ObjectMonitor::_owner", Integer.class, null, -1);
@@ -464,34 +464,34 @@
public final int objectMonitorEntryList = getFieldOffset("ObjectMonitor::_EntryList", Integer.class, "ObjectWaiter*", -1);
public final int objectMonitorSucc = getFieldOffset("ObjectMonitor::_succ", Integer.class, "Thread*", -1);
- public final int markWordNoHashInPlace = getConstant("markOopDesc::no_hash_in_place", Integer.class);
- public final int markWordNoLockInPlace = getConstant("markOopDesc::no_lock_in_place", Integer.class);
+ public final int markWordNoHashInPlace = getConstant("markWord::no_hash_in_place", Integer.class);
+ public final int markWordNoLockInPlace = getConstant("markWord::no_lock_in_place", Integer.class);
/**
- * See {@code markOopDesc::prototype()}.
+ * See {@code markWord::prototype()}.
*/
public long arrayPrototypeMarkWord() {
return markWordNoHashInPlace | markWordNoLockInPlace;
}
/**
- * See {@code markOopDesc::copy_set_hash()}.
+ * See {@code markWord::copy_set_hash()}.
*/
public long tlabIntArrayMarkWord() {
- long tmp = arrayPrototypeMarkWord() & (~markOopDescHashMaskInPlace);
- tmp |= ((0x2 & markOopDescHashMask) << markOopDescHashShift);
+ long tmp = arrayPrototypeMarkWord() & (~markWordHashMaskInPlace);
+ tmp |= ((0x2 & markWordHashMask) << markWordHashShift);
return tmp;
}
/**
* Mark word right shift to get identity hash code.
*/
- public final int identityHashCodeShift = getConstant("markOopDesc::hash_shift", Integer.class);
+ public final int identityHashCodeShift = getConstant("markWord::hash_shift", Integer.class);
/**
* Identity hash code value when uninitialized.
*/
- public final int uninitializedIdentityHashCodeValue = getConstant("markOopDesc::no_hash", Integer.class);
+ public final int uninitializedIdentityHashCodeValue = getConstant("markWord::no_hash", Integer.class);
public final int methodAccessFlagsOffset = getFieldOffset("Method::_access_flags", Integer.class, "AccessFlags");
public final int methodConstMethodOffset = getFieldOffset("Method::_constMethod", Integer.class, "ConstMethod*");
@@ -565,7 +565,7 @@
public final int arrayKlassOffset = getFieldValue("java_lang_Class::_array_klass_offset", Integer.class, "int");
public final int basicLockSize = getFieldValue("CompilerToVM::Data::sizeof_BasicLock", Integer.class, "int");
- public final int basicLockDisplacedHeaderOffset = getFieldOffset("BasicLock::_displaced_header", Integer.class, "markOop");
+ public final int basicLockDisplacedHeaderOffset = getFieldOffset("BasicLock::_displaced_header", Integer.class, "markWord");
public final int threadPollingPageOffset = getFieldOffset("Thread::_polling_page", Integer.class, "address", -1);
public final int threadAllocatedBytesOffset = getFieldOffset("Thread::_allocated_bytes", Integer.class, "jlong");
--- a/src/jdk.jartool/share/classes/sun/security/tools/jarsigner/Main.java Mon Aug 19 20:31:10 2019 -0400
+++ b/src/jdk.jartool/share/classes/sun/security/tools/jarsigner/Main.java Mon Aug 19 21:14:34 2019 -0400
@@ -685,6 +685,12 @@
Vector<JarEntry> entriesVec = new Vector<>();
byte[] buffer = new byte[8192];
+ String suffix1 = "-Digest-Manifest";
+ String suffix2 = "-Digest-" + ManifestDigester.MF_MAIN_ATTRS;
+
+ int suffixLength1 = suffix1.length();
+ int suffixLength2 = suffix2.length();
+
Enumeration<JarEntry> entries = jf.entries();
while (entries.hasMoreElements()) {
JarEntry je = entries.nextElement();
@@ -701,9 +707,14 @@
boolean found = false;
for (Object obj : sf.getMainAttributes().keySet()) {
String key = obj.toString();
- if (key.endsWith("-Digest-Manifest")) {
- digestMap.put(alias,
- key.substring(0, key.length() - 16));
+ if (key.endsWith(suffix1)) {
+ digestMap.put(alias, key.substring(
+ 0, key.length() - suffixLength1));
+ found = true;
+ break;
+ } else if (key.endsWith(suffix2)) {
+ digestMap.put(alias, key.substring(
+ 0, key.length() - suffixLength2));
found = true;
break;
}
--- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -42,17 +42,19 @@
FakeOop() : _oop() { _oop.set_mark_raw(originalMark()); }
oop get_oop() { return &_oop; }
- markOop mark() { return _oop.mark_raw(); }
- void set_mark(markOop m) { _oop.set_mark_raw(m); }
+ markWord mark() { return _oop.mark_raw(); }
+ void set_mark(markWord m) { _oop.set_mark_raw(m); }
void forward_to(oop obj) {
- markOop m = markOopDesc::encode_pointer_as_mark(obj);
+ markWord m = markWord::encode_pointer_as_mark(obj);
_oop.set_mark_raw(m);
}
- static markOop originalMark() { return markOop(markOopDesc::lock_mask_in_place); }
- static markOop changedMark() { return markOop(0x4711); }
+ static markWord originalMark() { return markWord(markWord::lock_mask_in_place); }
+ static markWord changedMark() { return markWord(0x4711); }
};
+#define ASSERT_MARK_WORD_EQ(a, b) ASSERT_EQ((a).value(), (b).value())
+
TEST_VM(PreservedMarks, iterate_and_restore) {
// Need to disable biased locking to easily
// create oops that "must_be_preseved"
@@ -65,16 +67,16 @@
FakeOop o4;
// Make sure initial marks are correct.
- ASSERT_EQ(o1.mark(), FakeOop::originalMark());
- ASSERT_EQ(o2.mark(), FakeOop::originalMark());
- ASSERT_EQ(o3.mark(), FakeOop::originalMark());
- ASSERT_EQ(o4.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::originalMark());
// Change the marks and verify change.
o1.set_mark(FakeOop::changedMark());
o2.set_mark(FakeOop::changedMark());
- ASSERT_EQ(o1.mark(), FakeOop::changedMark());
- ASSERT_EQ(o2.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::changedMark());
// Push o1 and o2 to have their marks preserved.
pm.push(o1.get_oop(), o1.mark());
@@ -92,6 +94,6 @@
// Restore all preserved and verify that the changed
// mark is now present at o3 and o4.
pm.restore();
- ASSERT_EQ(o3.mark(), FakeOop::changedMark());
- ASSERT_EQ(o4.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::changedMark());
}
--- a/test/hotspot/gtest/oops/test_markOop.cpp Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/gtest/oops/test_markOop.cpp Mon Aug 19 21:14:34 2019 -0400
@@ -79,7 +79,7 @@
};
-TEST_VM(markOopDesc, printing) {
+TEST_VM(markWord, printing) {
JavaThread* THREAD = JavaThread::current();
ThreadInVMfromNative invm(THREAD);
ResourceMark rm(THREAD);
@@ -98,10 +98,10 @@
// Lock using biased locking.
BasicObjectLock lock;
lock.set_obj(obj);
- markOop mark = obj->mark()->incr_bias_epoch();
+ markWord mark = obj->mark().incr_bias_epoch();
obj->set_mark(mark);
ObjectSynchronizer::fast_enter(h_obj, lock.lock(), true, THREAD);
- // Look for the biased_locker in markOop, not prototype_header.
+ // Look for the biased_locker in markWord, not prototype_header.
#ifdef _LP64
assert_not_test_pattern(h_obj, "mark(is_biased biased_locker=0x0000000000000000");
#else
--- a/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java Mon Aug 19 21:14:34 2019 -0400
@@ -56,7 +56,7 @@
if (GC.Parallel.isSupported()) {
noneGCSupported = false;
- testInitialGCThreadLogging("UseParallelGC", "ParGC Thread");
+ testInitialGCThreadLogging("UseParallelGC", "GC Thread");
}
if (GC.Shenandoah.isSupported()) {
--- a/test/hotspot/jtreg/gc/shenandoah/compiler/TestWriteBarrierClearControl.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestWriteBarrierClearControl.java Mon Aug 19 21:14:34 2019 -0400
@@ -26,6 +26,7 @@
* @summary Clearing control during final graph reshape causes memory barrier to loose dependency on null check
* @key gc
* @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @requires vm.flavor == "server"
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation
* -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC
* -XX:+UnlockDiagnosticVMOptions -XX:+StressLCM -XX:+StressGCM
--- a/test/hotspot/jtreg/gc/shenandoah/options/TestLoopMiningArguments.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestLoopMiningArguments.java Mon Aug 19 21:14:34 2019 -0400
@@ -26,6 +26,7 @@
* @summary Test that loop mining arguments are sane
* @key gc
* @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @requires vm.flavor == "server"
* @library /test/lib
* @run driver TestLoopMiningArguments
*/
--- a/test/hotspot/jtreg/runtime/cds/appcds/RewriteBytecodesTest.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/runtime/cds/appcds/RewriteBytecodesTest.java Mon Aug 19 21:14:34 2019 -0400
@@ -24,7 +24,7 @@
/*
* @test
- * @summary Use ClassLoader.defineClass() to load a class with rewritten bytecode. Make sure
+ * @summary Use Lookup.defineClass() to load a class with rewritten bytecode. Make sure
* the archived class with the same name is not loaded.
* @requires vm.cds
* @library /test/lib
@@ -52,7 +52,6 @@
OutputAnalyzer output = TestCommon.exec(appJar,
// command-line arguments ...
- "--add-opens=java.base/java.lang=ALL-UNNAMED",
use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
--- a/test/hotspot/jtreg/runtime/cds/appcds/customLoader/LoaderSegregationTest.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/runtime/cds/appcds/customLoader/LoaderSegregationTest.java Mon Aug 19 21:14:34 2019 -0400
@@ -63,7 +63,7 @@
String wbJar = JarBuilder.build(true, "WhiteBox", "sun/hotspot/WhiteBox");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
- String appJar = JarBuilder.build("LoaderSegregation_app", "LoaderSegregation",
+ String appJar = JarBuilder.build("LoaderSegregation_app", "LoaderSegregation", "LoaderSegregation$1",
"CustomLoadee", "CustomLoadee2", "CustomLoadee3Child", "CustomInterface2_ia",
"OnlyBuiltin", "Util");
@@ -110,8 +110,6 @@
output = TestCommon.exec(TestCommon.concatPaths(appJar, app2Jar),
// command-line arguments ...
- "--add-opens=java.base/java.lang=ALL-UNNAMED",
- "--add-opens=java.base/java.security=ALL-UNNAMED",
use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
--- a/test/hotspot/jtreg/runtime/cds/appcds/customLoader/test-classes/LoaderSegregation.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/runtime/cds/appcds/customLoader/test-classes/LoaderSegregation.java Mon Aug 19 21:14:34 2019 -0400
@@ -81,8 +81,25 @@
}
{ // UNREGISTERED LOADER
- URLClassLoader urlClassLoader = new URLClassLoader(urls);
- Class c2 = Util.defineClassFromJAR(urlClassLoader, jarFile, ONLY_BUILTIN);
+ URLClassLoader urlClassLoader = new URLClassLoader(urls) {
+ protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
+ synchronized (getClassLoadingLock(name)) {
+ Class<?> c = findLoadedClass(name);
+ if (c == null) {
+ try {
+ c = findClass(name);
+ } catch (ClassNotFoundException e) {
+ c = getParent().loadClass(name);
+ }
+ }
+ if (resolve) {
+ resolveClass(c);
+ }
+ return c;
+ }
+ }
+ };
+ Class<?> c2 = urlClassLoader.loadClass(ONLY_BUILTIN);
if (c2.getClassLoader() != urlClassLoader) {
throw new RuntimeException("Error in test");
--- a/test/hotspot/jtreg/runtime/cds/appcds/test-classes/RewriteBytecodes.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/runtime/cds/appcds/test-classes/RewriteBytecodes.java Mon Aug 19 21:14:34 2019 -0400
@@ -23,6 +23,7 @@
*/
import java.io.File;
+import java.lang.invoke.MethodHandles;
import sun.hotspot.WhiteBox;
public class RewriteBytecodes {
@@ -30,7 +31,7 @@
String from = "___xxx___";
String to = "___yyy___";
File clsFile = new File(args[0]);
- Class superClass = Util.defineModifiedClass(RewriteBytecodes.class.getClassLoader(), clsFile, from, to);
+ Class superClass = Util.defineModifiedClass(MethodHandles.lookup(), clsFile, from, to);
Child child = new Child();
--- a/test/hotspot/jtreg/runtime/cds/appcds/test-classes/Util.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/runtime/cds/appcds/test-classes/Util.java Mon Aug 19 21:14:34 2019 -0400
@@ -23,19 +23,21 @@
*/
import java.io.*;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodHandles.Lookup;
import java.lang.reflect.*;
import java.util.jar.*;
public class Util {
/**
- * Invoke the loader.defineClass() class method to define the class stored in clsFile,
+ * Define the class as stored in clsFile with the provided lookup instance,
* with the following modification:
* <ul>
* <li> All ASCII strings in the class file bytes that matches fromString will be replaced with toString.
* NOTE: the two strings must be the exact same length.
* </ul>
*/
- public static Class defineModifiedClass(ClassLoader loader, File clsFile, String fromString, String toString)
+ public static Class<?> defineModifiedClass(Lookup lookup, File clsFile, String fromString, String toString)
throws FileNotFoundException, IOException, NoSuchMethodException, IllegalAccessException,
InvocationTargetException
{
@@ -46,14 +48,11 @@
System.out.println("Loading from: " + clsFile + " (" + buff.length + " bytes)");
- Method defineClass = ClassLoader.class.getDeclaredMethod("defineClass",
- buff.getClass(), int.class, int.class);
- defineClass.setAccessible(true);
- // We directly call into ClassLoader.defineClass() to define the "Super" class. Also,
+ // We directly call into Lookup.defineClass() to define the "Super" class. Also,
// rewrite its classfile so that it returns ___yyy___ instead of ___xxx___. Changing the
// classfile will guarantee that this class will NOT be loaded from the CDS archive.
- Class cls = (Class)defineClass.invoke(loader, buff, new Integer(0), new Integer(buff.length));
+ Class<?> cls = lookup.defineClass(buff);
System.out.println("Loaded : " + cls);
return cls;
@@ -105,44 +104,6 @@
return b;
}
- public static Class defineClassFromJAR(ClassLoader loader, File jarFile, String className)
- throws FileNotFoundException, IOException, NoSuchMethodException, IllegalAccessException,
- InvocationTargetException {
- return defineClassFromJAR(loader, jarFile, className, null, null);
- }
-
- /**
- * Invoke the loader.defineClass() class method to define the named class stored in a JAR file.
- *
- * If a class exists both in the classpath, as well as in the list of URLs of a URLClassLoader,
- * by default, the URLClassLoader will not define the class, and instead will delegate to the
- * app loader. This method is an easy way to force the class to be defined by the URLClassLoader.
- *
- * Optionally, you can modify the contents of the classfile buffer. See comments in
- * defineModifiedClass.
- */
- public static Class defineClassFromJAR(ClassLoader loader, File jarFile, String className,
- String fromString, String toString)
- throws FileNotFoundException, IOException, NoSuchMethodException, IllegalAccessException,
- InvocationTargetException
- {
- byte[] buff = getClassFileFromJar(jarFile, className);
-
- if (fromString != null) {
- replace(buff, fromString, toString);
- }
-
- //System.out.println("Loading from: " + ent + " (" + buff.length + " bytes)");
-
- Method defineClass = ClassLoader.class.getDeclaredMethod("defineClass",
- buff.getClass(), int.class, int.class);
- defineClass.setAccessible(true);
- Class cls = (Class)defineClass.invoke(loader, buff, new Integer(0), new Integer(buff.length));
-
- //System.out.println("Loaded : " + cls);
- return cls;
- }
-
public static byte[] getClassFileFromJar(File jarFile, String className) throws FileNotFoundException, IOException {
JarFile jf = new JarFile(jarFile);
JarEntry ent = jf.getJarEntry(className.replace('.', '/') + ".class");
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java Mon Aug 19 21:14:34 2019 -0400
@@ -56,7 +56,7 @@
"detach",
"universe",
"reattach",
- "longConstant markOopDesc::locked_value");
+ "longConstant markWord::locked_value");
Map<String, List<String>> expStrMap = new HashMap<>();
expStrMap.put("where", List.of(
@@ -65,8 +65,8 @@
"MaxJavaStackTraceDepth = "));
expStrMap.put("universe", List.of(
"Command not valid until attached to a VM"));
- expStrMap.put("longConstant markOopDesc::locked_value", List.of(
- "longConstant markOopDesc::locked_value"));
+ expStrMap.put("longConstant markWord::locked_value", List.of(
+ "longConstant markWord::locked_value"));
test.run(-1, cmds, expStrMap, null);
} catch (SkippedException se) {
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbFindPC.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbFindPC.java Mon Aug 19 21:14:34 2019 -0400
@@ -45,8 +45,13 @@
LingeredApp theApp = null;
try {
ClhsdbLauncher test = new ClhsdbLauncher();
- theApp = withXcomp ? LingeredApp.startApp(List.of("-Xcomp"))
- : LingeredApp.startApp(List.of("-Xint"));
+
+ theApp = new LingeredAppWithTrivialMain();
+ if (withXcomp) {
+ LingeredApp.startApp(List.of("-Xcomp"), theApp);
+ } else {
+ LingeredApp.startApp(List.of("-Xint"), theApp);
+ }
System.out.print("Started LingeredApp ");
if (withXcomp) {
System.out.print("(-Xcomp) ");
@@ -67,7 +72,7 @@
// attach permission issues.
if (output != null) {
String cmdStr = null;
- String[] parts = output.split("LingeredApp.main");
+ String[] parts = output.split("LingeredAppWithTrivialMain.main");
String[] tokens = parts[1].split(" ");
for (String token : tokens) {
if (token.contains("pc")) {
@@ -82,7 +87,7 @@
Map<String, List<String>> expStrMap = new HashMap<>();
if (withXcomp) {
expStrMap.put(cmdStr, List.of(
- "In code in NMethod for jdk/test/lib/apps/LingeredApp.main",
+ "In code in NMethod for LingeredAppWithTrivialMain.main",
"content:",
"oops:",
"frame size:"));
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java Mon Aug 19 21:14:34 2019 -0400
@@ -51,21 +51,21 @@
List<String> cmds = List.of(
"longConstant",
- "longConstant markOopDesc::locked_value",
- "longConstant markOopDesc::lock_bits",
+ "longConstant markWord::locked_value",
+ "longConstant markWord::lock_bits",
"longConstant jtreg::test 6",
"longConstant jtreg::test");
Map<String, List<String>> expStrMap = new HashMap<>();
expStrMap.put("longConstant", List.of(
- "longConstant markOopDesc::locked_value",
- "longConstant markOopDesc::lock_bits",
+ "longConstant markWord::locked_value",
+ "longConstant markWord::lock_bits",
"InvocationCounter::count_increment",
- "markOopDesc::epoch_mask_in_place"));
- expStrMap.put("longConstant markOopDesc::locked_value", List.of(
- "longConstant markOopDesc::locked_value"));
- expStrMap.put("longConstant markOopDesc::lock_bits", List.of(
- "longConstant markOopDesc::lock_bits"));
+ "markWord::epoch_mask_in_place"));
+ expStrMap.put("longConstant markWord::locked_value", List.of(
+ "longConstant markWord::locked_value"));
+ expStrMap.put("longConstant markWord::lock_bits", List.of(
+ "longConstant markWord::lock_bits"));
expStrMap.put("longConstant jtreg::test", List.of(
"longConstant jtreg::test 6"));
@@ -93,12 +93,12 @@
// Expected output snippet is of the form (on x64-64):
// ...
// longConstant VM_Version::CPU_SHA 17179869184
- // longConstant markOopDesc::biased_lock_bits 1
- // longConstant markOopDesc::age_shift 3
- // longConstant markOopDesc::hash_mask_in_place 549755813632
+ // longConstant markWord::biased_lock_bits 1
+ // longConstant markWord::age_shift 3
+ // longConstant markWord::hash_mask_in_place 549755813632
// ...
- checkLongValue("markOopDesc::hash_mask_in_place",
+ checkLongValue("markWord::hash_mask_in_place",
longConstantOutput,
Platform.is64bit() ? 549755813632L: 4294967168L);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/LingeredAppWithTrivialMain.java Mon Aug 19 21:14:34 2019 -0400
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import jdk.test.lib.apps.LingeredApp;
+
+/**
+ * This is a wrapper around LingeredApp.main to ensure we reliably get a
+ * compiled main nmethod in the stack trace on all platforms when using
+ * -Xcomp.
+ */
+public class LingeredAppWithTrivialMain extends LingeredApp {
+ public static void main(String args[]) {
+ LingeredApp.main(args);
+ }
+ }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/com/sun/crypto/provider/Cipher/ChaCha20/OutputSizeTest.java Mon Aug 19 21:14:34 2019 -0400
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8224997
+ * @summary ChaCha20-Poly1305 TLS cipher suite decryption throws ShortBufferException
+ * @library /test/lib
+ * @build jdk.test.lib.Convert
+ * @run main OutputSizeTest
+ */
+
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+import java.security.Key;
+import java.security.SecureRandom;
+import javax.crypto.Cipher;
+import javax.crypto.KeyGenerator;
+import javax.crypto.spec.ChaCha20ParameterSpec;
+import javax.crypto.spec.IvParameterSpec;
+
+public class OutputSizeTest {
+
+ private static final SecureRandom SR = new SecureRandom();
+
+ public static void main(String args[]) throws Exception {
+ testCC20GetOutSize();
+ testCC20P1305GetOutSize();
+ testMultiPartAEADDec();
+ }
+
+ private static void testCC20GetOutSize()
+ throws GeneralSecurityException {
+ boolean result = true;
+ KeyGenerator kg = KeyGenerator.getInstance("ChaCha20", "SunJCE");
+ kg.init(256);
+
+ // ChaCha20 encrypt
+ Cipher cc20 = Cipher.getInstance("ChaCha20", "SunJCE");
+ cc20.init(Cipher.ENCRYPT_MODE, kg.generateKey(),
+ new ChaCha20ParameterSpec(getRandBuf(12), 10));
+
+ testOutLen(cc20, 0, 0);
+ testOutLen(cc20, 5, 5);
+ testOutLen(cc20, 5120, 5120);
+ // perform an update, then test with a final block
+ byte[] input = new byte[5120];
+ SR.nextBytes(input);
+ cc20.update(input);
+ testOutLen(cc20, 1024, 1024);
+
+ // Decryption lengths should be calculated the same way as encryption
+ cc20.init(Cipher.DECRYPT_MODE, kg.generateKey(),
+ new ChaCha20ParameterSpec(getRandBuf(12), 10));
+ testOutLen(cc20, 0, 0);
+ testOutLen(cc20, 5, 5);
+ testOutLen(cc20, 5120, 5120);
+ // perform an update, then test with a final block
+ cc20.update(input);
+ testOutLen(cc20, 1024, 1024);
+ }
+
+ private static void testCC20P1305GetOutSize()
+ throws GeneralSecurityException {
+ KeyGenerator kg = KeyGenerator.getInstance("ChaCha20", "SunJCE");
+ kg.init(256);
+
+ // ChaCha20 encrypt
+ Cipher cc20 = Cipher.getInstance("ChaCha20-Poly1305", "SunJCE");
+ cc20.init(Cipher.ENCRYPT_MODE, kg.generateKey(),
+ new IvParameterSpec(getRandBuf(12)));
+
+ // Encryption lengths are calculated as the input length plus the tag
+ // length (16).
+ testOutLen(cc20, 0, 16);
+ testOutLen(cc20, 5, 21);
+ testOutLen(cc20, 5120, 5136);
+ // perform an update, then test with a final block
+ byte[] input = new byte[5120];
+ SR.nextBytes(input);
+ cc20.update(input);
+ testOutLen(cc20, 1024, 1040);
+
+ // Decryption lengths are handled differently for AEAD mode. The length
+ // should be zero for anything up to and including the first 16 bytes
+ // (since that's the tag). Anything above that should be the input
+ // length plus any unprocessed input (via update calls), minus the
+ // 16 byte tag.
+ cc20.init(Cipher.DECRYPT_MODE, kg.generateKey(),
+ new IvParameterSpec(getRandBuf(12)));
+ testOutLen(cc20, 0, 0);
+ testOutLen(cc20, 5, 0);
+ testOutLen(cc20, 16, 0);
+ testOutLen(cc20, 5120, 5104);
+ // Perform an update, then test with a the length of a final chunk
+ // of data.
+ cc20.update(input);
+ testOutLen(cc20, 1024, 6128);
+ }
+
+ private static void testMultiPartAEADDec() throws GeneralSecurityException {
+ KeyGenerator kg = KeyGenerator.getInstance("ChaCha20", "SunJCE");
+ kg.init(256);
+ Key key = kg.generateKey();
+ IvParameterSpec ivps = new IvParameterSpec(getRandBuf(12));
+
+ // Encrypt some data so we can test decryption.
+ byte[] pText = getRandBuf(2048);
+ ByteBuffer pTextBase = ByteBuffer.wrap(pText);
+
+ Cipher enc = Cipher.getInstance("ChaCha20-Poly1305", "SunJCE");
+ enc.init(Cipher.ENCRYPT_MODE, key, ivps);
+ ByteBuffer ctBuf = ByteBuffer.allocateDirect(
+ enc.getOutputSize(pText.length));
+ enc.doFinal(pTextBase, ctBuf);
+
+ // Create a new direct plain text ByteBuffer which will catch the
+ // decrypted data.
+ ByteBuffer ptBuf = ByteBuffer.allocateDirect(pText.length);
+
+ // Set the cipher text buffer limit to roughly half the data so we can
+ // do an update/final sequence.
+ ctBuf.position(0).limit(1024);
+
+ Cipher dec = Cipher.getInstance("ChaCha20-Poly1305", "SunJCE");
+ dec.init(Cipher.DECRYPT_MODE, key, ivps);
+ dec.update(ctBuf, ptBuf);
+ System.out.println("CTBuf: " + ctBuf);
+ System.out.println("PTBuf: " + ptBuf);
+ ctBuf.limit(ctBuf.capacity());
+ dec.doFinal(ctBuf, ptBuf);
+
+ ptBuf.flip();
+ pTextBase.flip();
+ System.out.println("PT Base:" + pTextBase);
+ System.out.println("PT Actual:" + ptBuf);
+
+ if (pTextBase.compareTo(ptBuf) != 0) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Plaintext mismatch: Original: ").
+ append(pTextBase.toString()).append("\nActual :").
+ append(ptBuf);
+ throw new RuntimeException(sb.toString());
+ }
+ }
+
+ private static void testOutLen(Cipher c, int inLen, int expOut) {
+ int actualOut = c.getOutputSize(inLen);
+ if (actualOut != expOut) {
+ throw new RuntimeException("Cipher " + c + ", in: " + inLen +
+ ", expOut: " + expOut + ", actual: " + actualOut);
+ }
+ }
+
+ private static byte[] getRandBuf(int len) {
+ byte[] buf = new byte[len];
+ SR.nextBytes(buf);
+ return buf;
+ }
+}
+
--- a/test/jdk/java/net/Authenticator/B4769350.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/java/net/Authenticator/B4769350.java Mon Aug 19 21:14:34 2019 -0400
@@ -23,7 +23,7 @@
/**
* @test
- * @bug 4769350 8017779
+ * @bug 4769350 8017779 8191169
* @modules jdk.httpserver
* @run main/othervm B4769350 server
* @run main/othervm B4769350 proxy
--- a/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPSetAuthenticatorTest.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPSetAuthenticatorTest.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,8 +115,8 @@
HttpAuthType mode)
throws IOException
{
- HttpTestAuthenticator authOne = new HttpTestAuthenticator("dublin", "foox");
- HttpTestAuthenticator authTwo = new HttpTestAuthenticator("dublin", "foox");
+ HttpTestAuthenticator authOne = new HttpTestAuthenticator("authOne", "dublin", "foox");
+ HttpTestAuthenticator authTwo = new HttpTestAuthenticator("authTwo", "dublin", "foox");
int expectedIncrement = scheme == HttpSchemeType.NONE
? 0 : EXPECTED_AUTH_CALLS_PER_TEST;
int count;
--- a/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPTest.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPTest.java Mon Aug 19 21:14:34 2019 -0400
@@ -89,8 +89,10 @@
// count will be incremented every time getPasswordAuthentication()
// is called from the client side.
final AtomicInteger count = new AtomicInteger();
+ private final String name;
- public HttpTestAuthenticator(String realm, String username) {
+ public HttpTestAuthenticator(String name, String realm, String username) {
+ this.name = name;
this.realm = realm;
this.username = username;
}
@@ -98,7 +100,7 @@
@Override
protected PasswordAuthentication getPasswordAuthentication() {
if (skipCount.get() == null || skipCount.get().booleanValue() == false) {
- System.out.println("Authenticator called: " + count.incrementAndGet());
+ System.out.println("Authenticator " + name + " called: " + count.incrementAndGet());
}
return new PasswordAuthentication(getUserName(),
new char[] {'b','a','r'});
@@ -118,6 +120,11 @@
throw new SecurityException("User unknown: " + user);
}
+ @Override
+ public String toString() {
+ return super.toString() + "[name=\"" + name + "\"]";
+ }
+
public final String getUserName() {
return username;
}
@@ -128,7 +135,7 @@
}
public static final HttpTestAuthenticator AUTHENTICATOR;
static {
- AUTHENTICATOR = new HttpTestAuthenticator("dublin", "foox");
+ AUTHENTICATOR = new HttpTestAuthenticator("AUTHENTICATOR","dublin", "foox");
Authenticator.setDefault(AUTHENTICATOR);
}
--- a/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPTestServer.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/java/net/HttpURLConnection/SetAuthenticator/HTTPTestServer.java Mon Aug 19 21:14:34 2019 -0400
@@ -165,7 +165,7 @@
for (int i = 1; i <= max; i++) {
B bindable = createBindable();
SocketAddress address = getAddress(bindable);
- String key = address.toString();
+ String key = toString(address);
if (addresses.addIfAbsent(key)) {
System.out.println("Socket bound to: " + key
+ " after " + i + " attempt(s)");
@@ -188,6 +188,16 @@
+ "addresses used before: " + addresses);
}
+ private static String toString(SocketAddress address) {
+ // We don't rely on address.toString(): sometimes it can be
+ // "/127.0.0.1:port", sometimes it can be "localhost/127.0.0.1:port"
+ // Instead we compose our own string representation:
+ InetSocketAddress candidate = (InetSocketAddress) address;
+ String hostAddr = candidate.getAddress().getHostAddress();
+ if (hostAddr.contains(":")) hostAddr = "[" + hostAddr + "]";
+ return hostAddr + ":" + candidate.getPort();
+ }
+
protected abstract B createBindable() throws IOException;
protected abstract SocketAddress getAddress(B bindable);
--- a/test/jdk/java/net/SocketOption/TcpKeepAliveTest.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/java/net/SocketOption/TcpKeepAliveTest.java Mon Aug 19 21:14:34 2019 -0400
@@ -31,6 +31,7 @@
import java.io.IOException;
import java.net.DatagramSocket;
import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.net.MulticastSocket;
import java.net.ServerSocket;
import java.net.Socket;
@@ -43,9 +44,9 @@
private static final int DEFAULT_KEEP_ALIVE_INTVL = 53;
public static void main(String args[]) throws IOException {
-
- try (ServerSocket ss = new ServerSocket(0);
- Socket s = new Socket(InetAddress.getLoopbackAddress(), ss.getLocalPort());
+ var loopback = InetAddress.getLoopbackAddress();
+ try (ServerSocket ss = boundServer(loopback);
+ Socket s = new Socket(loopback, ss.getLocalPort());
DatagramSocket ds = new DatagramSocket(0);
MulticastSocket mc = new MulticastSocket(0)) {
if (ss.supportedOptions().contains(ExtendedSocketOptions.TCP_KEEPIDLE)) {
@@ -110,4 +111,11 @@
}
}
}
+
+ private static ServerSocket boundServer(InetAddress address) throws IOException {
+ var socketAddress = new InetSocketAddress(address, 0);
+ var server = new ServerSocket();
+ server.bind(socketAddress);
+ return server;
+ }
}
--- a/test/jdk/java/net/URLConnection/SetIfModifiedSince.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/java/net/URLConnection/SetIfModifiedSince.java Mon Aug 19 21:14:34 2019 -0400
@@ -36,6 +36,7 @@
public class SetIfModifiedSince {
static volatile boolean successfulHeaderCheck = false;
+ static final String MARKER = "A-test-name";
static class XServer extends Thread {
ServerSocket srv;
@@ -52,28 +53,49 @@
}
public void run() {
- try {
+ boolean foundMarker = false;
+ while (!foundMarker) {
String x;
- s = srv.accept ();
- is = s.getInputStream ();
- BufferedReader r = new BufferedReader(new InputStreamReader(is));
- os = s.getOutputStream ();
- while ((x=r.readLine()) != null) {
- String header = "If-Modified-Since: ";
- if (x.startsWith(header)) {
- if (x.charAt(header.length()) == '?') {
- s.close ();
- srv.close (); // or else the HTTPURLConnection will retry
- throw new RuntimeException
- ("Invalid HTTP date specification");
+ try {
+ s = srv.accept();
+ System.out.println("Server: accepting connection from: " + s);
+ is = s.getInputStream ();
+ } catch (IOException io) {
+ System.err.println("Server: Failed to accept connection: " + io);
+ io.printStackTrace();
+ try { srv.close(); } catch (IOException ioc) { }
+ break;
+ }
+ try {
+ BufferedReader r = new BufferedReader(new InputStreamReader(is));
+ os = s.getOutputStream ();
+ boolean foundHeader;
+ while ((x=r.readLine()) != null) {
+ String testname = MARKER + ": ";
+ String header = "If-Modified-Since: ";
+ if (x.startsWith(header)) {
+ foundHeader = true;
+ System.out.println("Server: found header: " + x);
+ if (x.charAt(header.length()) == '?') {
+ s.close ();
+ srv.close (); // or else the HTTPURLConnection will retry
+ throw new RuntimeException
+ ("Invalid HTTP date specification");
+ }
+ if (foundMarker) break;
+ } else if (x.startsWith(testname)) {
+ foundMarker = true;
+ System.out.println("Server: found marker: " + x);
}
- break;
}
- }
- successfulHeaderCheck = true;
- s.close ();
- srv.close (); // or else the HTTPURLConnection will retry
- } catch (IOException e) {}
+ successfulHeaderCheck = true;
+ s.close ();
+ // only close server if connected from this test.
+ if (foundMarker) {
+ srv.close (); // or else the HTTPURLConnection will retry
+ }
+ } catch (IOException e) {}
+ }
}
}
@@ -94,6 +116,7 @@
.path("/index.html")
.toURLUnchecked();
URLConnection urlc = url.openConnection(Proxy.NO_PROXY);
+ urlc.setRequestProperty(MARKER, "SetIfModifiedSince");
urlc.setIfModifiedSince (10000000);
InputStream is = urlc.getInputStream ();
int i = 0, c;
--- a/test/jdk/sun/net/www/http/HttpClient/GetProxyPort.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/http/HttpClient/GetProxyPort.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,15 +27,25 @@
* @summary REGRESSION: Sun implementation for HttpURLConnection could throw NPE
* @modules java.base/sun.net
* java.base/sun.net.www.http
+ * @library /test/lib
*/
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.URL;
import sun.net.www.http.HttpClient;
+import jdk.test.lib.net.URIBuilder;
public class GetProxyPort {
public static void main(String[] args) throws Exception {
- ServerSocket ss = new ServerSocket(0);
- URL myURL = new URL("http://localhost:" + ss.getLocalPort());
+ ServerSocket ss = new ServerSocket();
+ InetAddress loopback = InetAddress.getLoopbackAddress();
+ ss.bind(new InetSocketAddress(loopback, 0));
+ URL myURL = URIBuilder.newBuilder()
+ .scheme("http")
+ .loopback()
+ .port(ss.getLocalPort())
+ .toURL();
HttpClient httpC = new HttpClient(myURL, null, -1);
int port = httpC.getProxyPortUsed();
}
--- a/test/jdk/sun/net/www/http/HttpClient/ImplicitFileName.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/http/HttpClient/ImplicitFileName.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2001, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,19 +27,29 @@
* @summary Make sure that implicit filenames will be returned as "/"
* @modules java.base/sun.net
* java.base/sun.net.www.http
+ * @library /test/lib
*/
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.net.URL;
import java.net.ServerSocket;
import sun.net.www.http.HttpClient;
+import jdk.test.lib.net.URIBuilder;
public class ImplicitFileName {
public static void main(String[] args) throws Exception {
- ServerSocket ss = new ServerSocket(0);
+ ServerSocket ss = new ServerSocket();
+ InetAddress loopback = InetAddress.getLoopbackAddress();
+ ss.bind(new InetSocketAddress(loopback, 0));
- URL url = new URL("http://localhost:" + ss.getLocalPort());
+ URL url = URIBuilder.newBuilder()
+ .scheme("http")
+ .loopback()
+ .port(ss.getLocalPort())
+ .toURL();
HttpClient c = HttpClient.New(url);
--- a/test/jdk/sun/net/www/http/HttpClient/IsAvailable.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/http/HttpClient/IsAvailable.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,43 +28,55 @@
* has been closed
* @modules java.base/sun.net
* java.base/sun.net.www.http:+open
+ * @library /test/lib
*/
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.net.URL;
import java.net.ServerSocket;
import sun.net.www.http.HttpClient;
import java.security.*;
import java.lang.reflect.Method;
+import jdk.test.lib.net.URIBuilder;
public class IsAvailable {
public static void main(String[] args) throws Exception {
int readTimeout = 20;
- ServerSocket ss = new ServerSocket(0);
+ ServerSocket ss = new ServerSocket();
+ InetAddress loopback = InetAddress.getLoopbackAddress();
+ ss.bind(new InetSocketAddress(loopback, 0));
+
+ try (ServerSocket toclose = ss) {
- URL url1 = new URL("http://localhost:" + ss.getLocalPort());
- HttpClient c1 = HttpClient.New(url1);
+ URL url1 = URIBuilder.newBuilder()
+ .scheme("http")
+ .loopback()
+ .port(ss.getLocalPort())
+ .toURL();
- Method available = HttpClient.class.
- getDeclaredMethod("available", null);
- available.setAccessible(true);
+ HttpClient c1 = HttpClient.New(url1);
- c1.setReadTimeout(readTimeout);
- boolean a = (boolean) available.invoke(c1);
- if (!a) {
- throw new RuntimeException("connection should be available");
- }
- if (c1.getReadTimeout() != readTimeout) {
- throw new RuntimeException("read timeout has been altered");
- }
+ Method available = HttpClient.class.
+ getDeclaredMethod("available", null);
+ available.setAccessible(true);
- c1.closeServer();
+ c1.setReadTimeout(readTimeout);
+ boolean a = (boolean) available.invoke(c1);
+ if (!a) {
+ throw new RuntimeException("connection should be available");
+ }
+ if (c1.getReadTimeout() != readTimeout) {
+ throw new RuntimeException("read timeout has been altered");
+ }
- a = (boolean) available.invoke(c1);
- if (a) {
- throw new RuntimeException("connection shouldn't be available");
+ c1.closeServer();
+
+ a = (boolean) available.invoke(c1);
+ if (a) {
+ throw new RuntimeException("connection shouldn't be available");
+ }
}
-
- ss.close();
}
}
--- a/test/jdk/sun/net/www/http/HttpClient/IsKeepingAlive.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/http/HttpClient/IsKeepingAlive.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2001, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,32 +28,42 @@
* doPrivileged() call at appropriate places.
* @modules java.base/sun.net
* java.base/sun.net.www.http
+ * @library /test/lib
* @run main/othervm/policy=IsKeepingAlive.policy IsKeepingAlive
*/
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.net.URL;
import java.net.ServerSocket;
import sun.net.www.http.HttpClient;
import java.security.*;
+import jdk.test.lib.net.URIBuilder;
public class IsKeepingAlive {
public static void main(String[] args) throws Exception {
- ServerSocket ss = new ServerSocket(0);
+ ServerSocket ss = new ServerSocket();
+ InetAddress loopback = InetAddress.getLoopbackAddress();
+ ss.bind(new InetSocketAddress(loopback, 0));
- SecurityManager security = System.getSecurityManager();
- if (security == null) {
- security = new SecurityManager();
- System.setSecurityManager(security);
- }
+ try (ServerSocket toClose = ss) {
+ SecurityManager security = System.getSecurityManager();
+ if (security == null) {
+ security = new SecurityManager();
+ System.setSecurityManager(security);
+ }
- URL url1 = new URL("http://localhost:" + ss.getLocalPort());
-
- HttpClient c1 = HttpClient.New(url1);
+ URL url1 = URIBuilder.newBuilder()
+ .scheme("http")
+ .loopback()
+ .port(ss.getLocalPort())
+ .toURL();
- boolean keepAlive = c1.isKeepingAlive();
+ HttpClient c1 = HttpClient.New(url1);
- ss.close();
+ boolean keepAlive = c1.isKeepingAlive();
+ }
}
}
--- a/test/jdk/sun/net/www/http/HttpClient/OpenServer.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/http/HttpClient/OpenServer.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2001, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,22 +27,31 @@
* @summary Make sure HttpClient has
* doPrivileged() calls at appropriate places.
* @modules java.base/sun.net.www.http
+ * @library /test/lib
* @run main/othervm/policy=OpenServer.policy OpenServer
*/
import java.net.*;
import sun.net.www.http.HttpClient;
+import jdk.test.lib.net.URIBuilder;
public class OpenServer {
OpenServer() throws Exception {
- ServerSocket ss = new ServerSocket(0);
+ ServerSocket ss = new ServerSocket();
+ InetAddress loopback = InetAddress.getLoopbackAddress();
+ ss.bind(new InetSocketAddress(loopback, 0));
- URL myURL = new URL("http://localhost:" + ss.getLocalPort());
- HttpClient httpC = new HttpClient(myURL, null, -1);
+ try (ServerSocket toClose = ss) {
+ URL myURL = URIBuilder.newBuilder()
+ .scheme("http")
+ .loopback()
+ .port(ss.getLocalPort())
+ .toURL();
- ss.close();
+ HttpClient httpC = new HttpClient(myURL, null, -1);
+ }
}
public static void main(String [] args) throws Exception {
--- a/test/jdk/sun/net/www/http/KeepAliveStream/KeepAliveStreamCloseWithWrongContentLength.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/http/KeepAliveStream/KeepAliveStreamCloseWithWrongContentLength.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,13 @@
* @test
* @bug 4533243
* @summary Closing a keep alive stream gives NullPointerException
+ * @library /test/lib
* @run main/othervm/timeout=30 KeepAliveStreamCloseWithWrongContentLength
*/
import java.net.*;
import java.io.*;
+import jdk.test.lib.net.URIBuilder;
public class KeepAliveStreamCloseWithWrongContentLength {
@@ -75,13 +77,20 @@
}
public static void main (String[] args) throws Exception {
- ServerSocket serversocket = new ServerSocket (0);
+ final InetAddress loopback = InetAddress.getLoopbackAddress();
+ final ServerSocket serversocket = new ServerSocket();
+ serversocket.bind(new InetSocketAddress(loopback, 0));
+
try {
int port = serversocket.getLocalPort ();
XServer server = new XServer (serversocket);
server.start ();
- URL url = new URL ("http://localhost:"+port);
- HttpURLConnection urlc = (HttpURLConnection)url.openConnection ();
+ URL url = URIBuilder.newBuilder()
+ .scheme("http")
+ .loopback()
+ .port(port)
+ .toURL();
+ HttpURLConnection urlc = (HttpURLConnection)url.openConnection(Proxy.NO_PROXY);
InputStream is = urlc.getInputStream ();
int c = 0;
while (c != -1) {
@@ -98,7 +107,7 @@
} catch (NullPointerException e) {
throw new RuntimeException (e);
} finally {
- if (serversocket != null) serversocket.close();
+ serversocket.close();
}
}
}
--- a/test/jdk/sun/net/www/protocol/http/StreamingOutputStream.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/protocol/http/StreamingOutputStream.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
InetSocketAddress address = httpServer.getAddress();
URL url = new URL("http://" + address.getHostName() + ":" + address.getPort() + "/test/");
- HttpURLConnection uc = (HttpURLConnection)url.openConnection();
+ HttpURLConnection uc = (HttpURLConnection)url.openConnection(Proxy.NO_PROXY);
uc.setDoOutput(true);
uc.setFixedLengthStreamingMode(1);
@@ -87,7 +87,18 @@
* Http Server
*/
void startHttpServer() throws IOException {
- httpServer = com.sun.net.httpserver.HttpServer.create(new InetSocketAddress(0), 0);
+ InetAddress address = InetAddress.getLocalHost();
+ if (!InetAddress.getByName(address.getHostName()).equals(address)) {
+ // if this happens then we should possibly change the client
+ // side to use the address literal in its URL instead of
+ // the host name.
+ throw new IOException(address.getHostName()
+ + " resolves to "
+ + InetAddress.getByName(address.getHostName())
+ + " not to "
+ + address + ": check host configuration.");
+ }
+ httpServer = com.sun.net.httpserver.HttpServer.create(new InetSocketAddress(address, 0), 0);
HttpContext ctx = httpServer.createContext("/test/", new MyHandler());
httpServer.start();
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/sun/net/www/protocol/http/TestTransparentNTLM.java Mon Aug 19 21:14:34 2019 -0400
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8225425
+ * @summary Verifies that transparent NTLM (on Windows) is not used by default,
+ * and is used only when the relevant property is set.
+ * @requires os.family == "windows"
+ * @library /test/lib
+ * @run testng/othervm
+ * -Dtest.auth.succeed=false
+ * TestTransparentNTLM
+ * @run testng/othervm
+ * -Djdk.http.ntlm.transparentAuth=allHosts
+ * -Dtest.auth.succeed=true
+ * TestTransparentNTLM
+ * @run testng/othervm
+ * -Djdk.http.ntlm.transparentAuth=blahblah
+ * -Dtest.auth.succeed=false
+ * TestTransparentNTLM
+ * @run testng/othervm
+ * -Djdk.http.ntlm.transparentAuth=trustedHosts
+ * -Dtest.auth.succeed=false
+ * TestTransparentNTLM
+ */
+
+// Run with `trustedHosts` to exercise the native code, nothing more.
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.URL;
+import jdk.test.lib.net.URIBuilder;
+import org.testng.annotations.AfterTest;
+import org.testng.annotations.BeforeTest;
+import org.testng.annotations.Test;
+import org.testng.SkipException;
+import static java.lang.System.out;
+import static java.net.Proxy.NO_PROXY;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.fail;
+
+public class TestTransparentNTLM {
+
+ boolean succeed; // true if authentication is expected to succeed
+ Server server;
+ URL url;
+
+ @Test
+ public void testNTLM() throws IOException {
+ out.println("connecting to url: " + url);
+ HttpURLConnection uc = (HttpURLConnection)url.openConnection(NO_PROXY);
+ int respCode = uc.getResponseCode();
+ out.println("received: " + respCode);
+
+ if (succeed) {
+ assertEquals(respCode, HttpURLConnection.HTTP_OK);
+ String body = new String(uc.getInputStream().readAllBytes(), UTF_8);
+ out.println("received body: " + body);
+ } else {
+ assertEquals(respCode, HttpURLConnection.HTTP_UNAUTHORIZED);
+ }
+ }
+
+ static class Server extends Thread implements Closeable {
+
+ static final InetAddress LOOPBACK = InetAddress.getLoopbackAddress();
+ final ServerSocket serverSocket;
+ final boolean expectAuthToSucceed;
+
+ Server(boolean expectAuthToSucceed) throws IOException {
+ super("TestTransparentNTLM-Server");
+ serverSocket = new ServerSocket();
+ serverSocket.bind(new InetSocketAddress(LOOPBACK, 0));
+ this.expectAuthToSucceed = expectAuthToSucceed;
+ }
+
+ int port() {
+ return serverSocket.getLocalPort();
+ }
+
+ static final String AUTH_REQUIRED =
+ "HTTP/1.1 401 Unauthorized\r\n" +
+ "Content-Length: 0\r\n" +
+ "Connection: close\r\n" +
+ "WWW-Authenticate: NTLM\r\n\r\n";
+
+ static final String AUTH_STAGE_TWO =
+ "HTTP/1.1 401 Unauthorized\r\n" +
+ "Content-Length: 0\r\n" +
+ "WWW-Authenticate: NTLM TlRMTVNTUAACAAAAAAAAACgAAAABggAAU3J2Tm9uY2UAAAAAAAAAAA==\r\n\r\n";
+
+ static final String AUTH_SUCCESSFUL =
+ "HTTP/1.1 200 OK\r\n" +
+ "Content-Length: 11\r\n\r\n" +
+ "Hello world";
+
+ @Override
+ public void run() {
+ try {
+ try (Socket s = serverSocket.accept()) {
+ out.println("Server accepted connection - 1");
+ readRequestHeaders(s.getInputStream());
+ s.getOutputStream().write(AUTH_REQUIRED.getBytes(UTF_8));
+ }
+
+ if (expectAuthToSucceed) {
+ // await the second follow up connection
+ try (Socket s = serverSocket.accept()) {
+ out.println("Server accepted connection - 2");
+ readRequestHeaders(s.getInputStream());
+ s.getOutputStream().write(AUTH_STAGE_TWO.getBytes(UTF_8));
+ readRequestHeaders(s.getInputStream());
+ s.getOutputStream().write(AUTH_SUCCESSFUL.getBytes(UTF_8));
+ }
+ }
+ } catch (IOException e) {
+ fail("Unexpected exception", e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ serverSocket.close();
+ }
+
+ static final byte[] REQUEST_END = new byte[] {'\r', '\n', '\r', '\n'};
+
+ // Read until the end of the HTTP request headers
+ static void readRequestHeaders(InputStream is) throws IOException {
+ int requestEndCount = 0, r;
+ while ((r = is.read()) != -1) {
+ if (r == REQUEST_END[requestEndCount]) {
+ requestEndCount++;
+ if (requestEndCount == 4) {
+ break;
+ }
+ } else {
+ requestEndCount = 0;
+ }
+ }
+ }
+ }
+
+ @BeforeTest
+ public void setup() throws Exception {
+ succeed = System.getProperty("test.auth.succeed").equals("true");
+ if (succeed)
+ out.println("Expect client to succeed, with 200 Ok");
+ else
+ out.println("Expect client to fail, with 401 Unauthorized");
+
+ server = new Server(succeed);
+ server.start();
+ url = URIBuilder.newBuilder()
+ .scheme("http")
+ .loopback()
+ .port(server.port())
+ .path("/xxyyzz")
+ .toURL();
+ }
+
+ @AfterTest
+ public void teardown() throws Exception {
+ server.close();
+ server.join();
+ }
+}
--- a/test/jdk/sun/net/www/protocol/http/UserAuth.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/protocol/http/UserAuth.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,19 @@
* Http Server
*/
void startHttpServer() throws IOException {
- httpServer = com.sun.net.httpserver.HttpServer.create(new InetSocketAddress(0), 0);
+ InetAddress address = InetAddress.getLocalHost();
+ if (!InetAddress.getByName(address.getHostName()).equals(address)) {
+ // if this happens then we should possibly change the client
+ // side to use the address literal in its URL instead of
+ // the host name.
+ throw new IOException(address.getHostName()
+ + " resolves to "
+ + InetAddress.getByName(address.getHostName())
+ + " not to "
+ + address + ": check host configuration.");
+ }
+
+ httpServer = com.sun.net.httpserver.HttpServer.create(new InetSocketAddress(address, 0), 0);
// create HttpServer context
HttpContext ctx = httpServer.createContext("/redirect/", new RedirectHandler());
--- a/test/jdk/sun/net/www/protocol/http/UserCookie.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/protocol/http/UserCookie.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,19 @@
* Http Server
*/
void startHttpServer() throws IOException {
- httpServer = com.sun.net.httpserver.HttpServer.create(new InetSocketAddress(0), 0);
+ InetAddress address = InetAddress.getLocalHost();
+ if (!InetAddress.getByName(address.getHostName()).equals(address)) {
+ // if this happens then we should possibly change the client
+ // side to use the address literal in its URL instead of
+ // the host name.
+ throw new IOException(address.getHostName()
+ + " resolves to "
+ + InetAddress.getByName(address.getHostName())
+ + " not to "
+ + address + ": check host configuration.");
+ }
+
+ httpServer = com.sun.net.httpserver.HttpServer.create(new InetSocketAddress(address, 0), 0);
// create HttpServer context
HttpContext ctx = httpServer.createContext("/test/", new MyHandler());
--- a/test/jdk/sun/net/www/protocol/https/ChunkedOutputStream.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/protocol/https/ChunkedOutputStream.java Mon Aug 19 21:14:34 2019 -0400
@@ -37,6 +37,7 @@
import java.io.*;
import java.net.*;
import javax.net.ssl.*;
+import java.util.concurrent.atomic.AtomicInteger;
public class ChunkedOutputStream implements HttpCallback {
/*
@@ -47,6 +48,7 @@
static String trustStoreFile = "truststore";
static String passwd = "passphrase";
static int count = 0;
+ static final AtomicInteger rogueCount = new AtomicInteger();
static final String str1 = "Helloworld1234567890abcdefghijklmnopqrstuvwxyz"+
"1234567890abcdefkjsdlkjflkjsldkfjlsdkjflkj"+
@@ -132,12 +134,23 @@
req.sendResponse(200, "OK");
req.orderlyClose();
break;
+ default:
+ req.sendResponse(404, "Not Found");
+ req.orderlyClose();
+ break;
}
} catch (IOException e) {
e.printStackTrace();
}
}
+ public boolean dropPlainTextConnections() {
+ System.out.println("Unrecognized SSL message, plaintext connection?");
+ System.out.println("TestHttpsServer receveived rogue connection: ignoring it.");
+ rogueCount.incrementAndGet();
+ return true;
+ }
+
static void readAndCompare(InputStream is, String cmp) throws IOException {
int c;
byte buf[] = new byte[1024];
@@ -153,6 +166,26 @@
}
}
+ /* basic smoke test: verify that server drops plain connections */
+ static void testPlainText(String authority) throws Exception {
+ URL url = new URL("http://" + authority + "/Donauschiffsgesellschaftskapitaenskajuete");
+ System.out.println("client opening connection to: " + url);
+ HttpURLConnection urlc = (HttpURLConnection)url.openConnection(Proxy.NO_PROXY);
+ int rogue = rogueCount.get();
+ try {
+ int code = urlc.getResponseCode();
+ System.out.println("Unexpected response: " + code);
+ throw new AssertionError("Unexpected response: " + code);
+ } catch (SocketException x) {
+ // we expect that the server will drop the connection and
+ // close the accepted socket, so we should get a SocketException
+ // on the client side, and confirm that this::dropPlainTextConnections
+ // has ben called.
+ if (rogueCount.get() == rogue) throw x;
+ System.out.println("Got expected exception: " + x);
+ }
+ }
+
/* basic chunked test (runs twice) */
static void test1(String u) throws Exception {
@@ -303,6 +336,7 @@
server = new TestHttpsServer(
new ChunkedOutputStream(), 1, 10, loopback, 0);
System.out.println("Server started: listening on: " + server.getAuthority());
+ testPlainText(server.getAuthority());
// the test server doesn't support keep-alive yet
// test1("http://" + server.getAuthority() + "/d0");
test1("https://" + server.getAuthority() + "/d01");
--- a/test/jdk/sun/net/www/protocol/https/HttpCallback.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/protocol/https/HttpCallback.java Mon Aug 19 21:14:34 2019 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,4 +36,15 @@
* client and used to send the response
*/
void request (HttpTransaction msg);
+
+ /**
+ * Tells whether the server should simply close the
+ * connection and ignore the request when the first
+ * byte received by the server looks like a plain
+ * text connection.
+ * @return true if the request should be ignored.
+ **/
+ default boolean dropPlainTextConnections() {
+ return false;
+ }
}
--- a/test/jdk/sun/net/www/protocol/https/TestHttpsServer.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/jdk/sun/net/www/protocol/https/TestHttpsServer.java Mon Aug 19 21:14:34 2019 -0400
@@ -316,6 +316,7 @@
HttpCallback cb;
HandshakeStatus currentHSStatus;
boolean initialHSComplete;
+ boolean handshakeStarted;
/*
* All inbound data goes through this buffer.
*
@@ -364,6 +365,25 @@
case NEED_UNWRAP:
int bytes = schan.read(inNetBB);
+ if (!handshakeStarted && bytes > 0) {
+ handshakeStarted = true;
+ int byte0 = inNetBB.get(0);
+ if (byte0 != 0x16) {
+ // first byte of a TLS connection is supposed to be
+ // 0x16. If not it may be a plain text connection.
+ //
+ // Sometime a rogue client may try to open a plain
+ // connection with our server. Calling this method
+ // gives a chance to the test logic to ignore such
+ // rogue connections.
+ //
+ if (cb.dropPlainTextConnections()) {
+ try { schan.close(); } catch (IOException x) { };
+ return;
+ }
+ // else sslEng.unwrap will throw later on...
+ }
+ }
needIO:
while (currentHSStatus == HandshakeStatus.NEED_UNWRAP) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/sun/security/tools/jarsigner/SectionsOnly.java Mon Aug 19 21:14:34 2019 -0400
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8229775
+ * @summary Incorrect warning when jar was signed with -sectionsonly
+ * @library /test/lib
+ */
+
+import jdk.test.lib.SecurityTools;
+import jdk.test.lib.util.JarUtils;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+public class SectionsOnly {
+ public static void main(String[] args) throws Exception {
+ String common = "-storepass changeit -keypass changeit -keystore ks ";
+ SecurityTools.keytool(common
+ + "-keyalg rsa -genkeypair -alias me -dname CN=Me");
+ JarUtils.createJarFile(Path.of("so.jar"), Path.of("."),
+ Files.write(Path.of("so.txt"), new byte[0]));
+ SecurityTools.jarsigner(common + "-sectionsonly so.jar me");
+ SecurityTools.jarsigner(common + "-verify -verbose so.jar")
+ .shouldNotContain("Unparsable signature-related file")
+ .shouldHaveExitValue(0);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/micro/org/openjdk/bench/java/lang/StringSubstring.java Mon Aug 19 21:14:34 2019 -0400
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package org.openjdk.bench.java.lang;
+
+import org.openjdk.jmh.annotations.*;
+import java.util.concurrent.TimeUnit;
+
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Benchmark)
+public class StringSubstring {
+
+ public String s = new String("An arbitrary string that happened to be of length 52");
+
+ @Benchmark
+ public String from26toEnd0() {
+ return s.substring(26);
+ }
+
+ @Benchmark
+ public String from26toEnd1() {
+ return s.substring(26, s.length());
+ }
+}
--- a/test/micro/org/openjdk/bench/javax/crypto/full/AESBench.java Mon Aug 19 20:31:10 2019 -0400
+++ b/test/micro/org/openjdk/bench/javax/crypto/full/AESBench.java Mon Aug 19 21:14:34 2019 -0400
@@ -43,7 +43,7 @@
@Param({"AES/ECB/NoPadding", "AES/ECB/PKCS5Padding", "AES/CBC/NoPadding", "AES/CBC/PKCS5Padding"})
private String algorithm;
- @Param({"128"})
+ @Param({"128", "192", "256"})
private int keyLength;
@Param({"" + 16 * 1024})