8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
Reviewed-by: rkennke, coleenp, kbarrett, dcubed
--- a/src/hotspot/cpu/aarch64/aarch64.ad Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Tue Aug 06 10:48:21 2019 +0200
@@ -1771,7 +1771,7 @@
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
}
-
+
int bangsize = C->bang_size_in_bytes();
if (C->need_stack_bang(bangsize) && UseStackBanging)
__ generate_stack_overflow_check(bangsize);
@@ -3508,7 +3508,7 @@
assert_different_registers(oop, box, tmp, disp_hdr);
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
__ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
if (UseBiasedLocking && !UseOptoBiasInlining) {
@@ -3516,17 +3516,17 @@
}
// Check for existing monitor
- __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
-
- // Set tmp to be (markOop of object | UNLOCK_VALUE).
- __ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
+ __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
+
+ // Set tmp to be (markWord of object | UNLOCK_VALUE).
+ __ orr(tmp, disp_hdr, markWord::unlocked_value);
// Initialize the box. (Must happen before we update the object mark!)
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
- // Compare object markOop with an unlocked value (tmp) and if
- // equal exchange the stack address of our box with object markOop.
- // On failure disp_hdr contains the possibly locked markOop.
+ // Compare object markWord with an unlocked value (tmp) and if
+ // equal exchange the stack address of our box with object markWord.
+ // On failure disp_hdr contains the possibly locked markWord.
__ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, disp_hdr);
__ br(Assembler::EQ, cont);
@@ -3540,10 +3540,10 @@
// We did not see an unlocked object so try the fast recursive case.
// Check if the owner is self by comparing the value in the
- // markOop of object (disp_hdr) with the stack pointer.
+ // markWord of object (disp_hdr) with the stack pointer.
__ mov(rscratch1, sp);
__ sub(disp_hdr, disp_hdr, rscratch1);
- __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place));
+ __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
// If condition is true we are cont and hence we can store 0 as the
// displaced header in the box, which indicates that it is a recursive lock.
__ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
@@ -3558,15 +3558,15 @@
// otherwise m->owner may contain a thread or a stack address.
//
// Try to CAS m->owner from NULL to current thread.
- __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
+ __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
__ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, noreg); // Sets flags for result
// Store a non-null value into the box to avoid looking like a re-entrant
// lock. The fast-path monitor unlock code checks for
- // markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
+ // markWord::monitor_value so use markWord::unused_mark which has the
// relevant bit set, and also matches ObjectSynchronizer::slow_enter.
- __ mov(tmp, (address)markOopDesc::unused_mark());
+ __ mov(tmp, (address)markWord::unused_mark().value());
__ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
__ bind(cont);
@@ -3598,10 +3598,10 @@
// Handle existing monitor.
__ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
- __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
+ __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
// Check if it is still a light weight lock, this is is true if we
- // see the stack address of the basicLock in the markOop of the
+ // see the stack address of the basicLock in the markWord of the
// object.
__ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
@@ -3612,7 +3612,7 @@
// Handle existing monitor.
__ bind(object_has_monitor);
- __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
+ __ add(tmp, tmp, -markWord::monitor_value); // monitor
__ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
__ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
__ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -82,7 +82,7 @@
// Load object header
ldr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
- orr(hdr, hdr, markOopDesc::unlocked_value);
+ orr(hdr, hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
str(hdr, Address(disp_hdr, 0));
// test if object header is still the same (i.e. unlocked), and if so, store the
@@ -176,7 +176,7 @@
ldr(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t
- mov(t1, (int32_t)(intptr_t)markOopDesc::prototype());
+ mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
}
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -242,9 +242,9 @@
Label done;
__ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ eon(tmp, tmp, zr);
- __ ands(zr, tmp, markOopDesc::lock_mask_in_place);
+ __ ands(zr, tmp, markWord::lock_mask_in_place);
__ br(Assembler::NE, done);
- __ orr(tmp, tmp, markOopDesc::marked_value);
+ __ orr(tmp, tmp, markWord::marked_value);
__ eon(dst, tmp, zr);
__ bind(done);
@@ -548,11 +548,11 @@
Label slow_path;
__ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes()));
__ eon(tmp1, tmp1, zr);
- __ ands(zr, tmp1, markOopDesc::lock_mask_in_place);
+ __ ands(zr, tmp1, markWord::lock_mask_in_place);
__ br(Assembler::NE, slow_path);
// Decode forwarded object.
- __ orr(tmp1, tmp1, markOopDesc::marked_value);
+ __ orr(tmp1, tmp1, markWord::marked_value);
__ eon(res, tmp1, zr);
__ b(*stub->continuation());
@@ -665,11 +665,11 @@
Label slow_path;
__ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
__ eon(rscratch1, rscratch1, zr);
- __ ands(zr, rscratch1, markOopDesc::lock_mask_in_place);
+ __ ands(zr, rscratch1, markWord::lock_mask_in_place);
__ br(Assembler::NE, slow_path);
// Decode forwarded object.
- __ orr(rscratch1, rscratch1, markOopDesc::marked_value);
+ __ orr(rscratch1, rscratch1, markWord::marked_value);
__ eon(r0, rscratch1, zr);
__ ret(lr);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -472,7 +472,7 @@
counters = BiasedLocking::counters();
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg);
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
Address saved_mark_addr(lock_reg, 0);
@@ -489,15 +489,15 @@
null_check_offset = offset();
ldr(swap_reg, mark_addr);
}
- andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place);
- cmp(tmp_reg, (u1)markOopDesc::biased_lock_pattern);
+ andr(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place);
+ cmp(tmp_reg, (u1)markWord::biased_lock_pattern);
br(Assembler::NE, cas_label);
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
load_prototype_header(tmp_reg, obj_reg);
orr(tmp_reg, tmp_reg, rthread);
eor(tmp_reg, swap_reg, tmp_reg);
- andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place));
+ andr(tmp_reg, tmp_reg, ~((int) markWord::age_mask_in_place));
if (counters != NULL) {
Label around;
cbnz(tmp_reg, around);
@@ -520,7 +520,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ andr(rscratch1, tmp_reg, markWord::biased_lock_mask_in_place);
cbnz(rscratch1, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -532,7 +532,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place);
+ andr(rscratch1, tmp_reg, markWord::epoch_mask_in_place);
cbnz(rscratch1, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@@ -543,7 +543,7 @@
// don't accidentally blow away another thread's valid bias.
{
Label here;
- mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ mov(rscratch1, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
andr(swap_reg, swap_reg, rscratch1);
orr(tmp_reg, swap_reg, rthread);
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
@@ -628,8 +628,8 @@
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
- cmp(temp_reg, (u1)markOopDesc::biased_lock_pattern);
+ andr(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
+ cmp(temp_reg, (u1)markWord::biased_lock_pattern);
br(Assembler::EQ, done);
}
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3615,7 +3615,7 @@
if (UseBiasedLocking) {
__ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
} else {
- __ mov(rscratch1, (intptr_t)markOopDesc::prototype());
+ __ mov(rscratch1, (intptr_t)markWord::prototype().value());
}
__ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
__ store_klass_gap(r0, zr); // zero klass gap for compressed oops
--- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -92,7 +92,7 @@
if(UseBiasedLocking && !len->is_valid()) {
ldr(tmp, Address(klass, Klass::prototype_header_offset()));
} else {
- mov(tmp, (intptr_t)markOopDesc::prototype());
+ mov(tmp, (intptr_t)markWord::prototype().value());
}
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
@@ -219,7 +219,7 @@
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
str(obj, Address(disp_hdr, obj_offset));
- tst(hdr, markOopDesc::unlocked_value);
+ tst(hdr, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive locking
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -878,7 +878,7 @@
ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
// Test if object is already locked
- tst(Rmark, markOopDesc::unlocked_value);
+ tst(Rmark, markWord::unlocked_value);
b(already_locked, eq);
// Save old object->mark() into BasicLock's displaced header
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1345,7 +1345,7 @@
}
#endif
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
// Biased locking
@@ -1367,8 +1367,8 @@
// On MP platform loads could return 'stale' values in some cases.
// That is acceptable since either CAS or slow case path is taken in the worst case.
- andr(tmp_reg, swap_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
- cmp(tmp_reg, markOopDesc::biased_lock_pattern);
+ andr(tmp_reg, swap_reg, (uintx)markWord::biased_lock_mask_in_place);
+ cmp(tmp_reg, markWord::biased_lock_pattern);
b(cas_label, ne);
@@ -1379,7 +1379,7 @@
orr(tmp_reg, tmp_reg, Rthread);
eor(tmp_reg, tmp_reg, swap_reg);
- bics(tmp_reg, tmp_reg, ((int) markOopDesc::age_mask_in_place));
+ bics(tmp_reg, tmp_reg, ((int) markWord::age_mask_in_place));
#ifndef PRODUCT
if (counters != NULL) {
@@ -1401,7 +1401,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- tst(tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
+ tst(tmp_reg, (uintx)markWord::biased_lock_mask_in_place);
b(try_revoke_bias, ne);
// Biasing is still enabled for this data type. See whether the
@@ -1413,7 +1413,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- tst(tmp_reg, (uintx)markOopDesc::epoch_mask_in_place);
+ tst(tmp_reg, (uintx)markWord::epoch_mask_in_place);
b(try_rebias, ne);
// tmp_reg has the age, epoch and pattern bits cleared
@@ -1431,10 +1431,10 @@
// until the assembler can be made smarter, we need to make some assumptions about the values
// so we can optimize this:
- assert((markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
+ assert((markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
mov(swap_reg, AsmOperand(swap_reg, lsl, 23));
- mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markOop with thread bits cleared (for CAS)
+ mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markWord with thread bits cleared (for CAS)
orr(tmp_reg, swap_reg, Rthread); // new mark
@@ -1519,8 +1519,8 @@
// the bias bit would be clear.
ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andr(tmp_reg, tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
- cmp(tmp_reg, markOopDesc::biased_lock_pattern);
+ andr(tmp_reg, tmp_reg, (uintx)markWord::biased_lock_mask_in_place);
+ cmp(tmp_reg, markWord::biased_lock_pattern);
b(done, eq);
}
@@ -1993,7 +1993,7 @@
// Invariant: Rmark loaded below does not contain biased lock pattern
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
- tst(Rmark, markOopDesc::unlocked_value);
+ tst(Rmark, markWord::unlocked_value);
b(fast_lock, ne);
// Check for recursive lock
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -861,16 +861,16 @@
__ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- assert(markOopDesc::unlocked_value == 1, "adjust this code");
- __ tbz(Rtemp, exact_log2(markOopDesc::unlocked_value), slow_case);
+ assert(markWord::unlocked_value == 1, "adjust this code");
+ __ tbz(Rtemp, exact_log2(markWord::unlocked_value), slow_case);
if (UseBiasedLocking) {
- assert(is_power_of_2(markOopDesc::biased_lock_bit_in_place), "adjust this code");
- __ tbnz(Rtemp, exact_log2(markOopDesc::biased_lock_bit_in_place), slow_case);
+ assert(is_power_of_2(markWord::biased_lock_bit_in_place), "adjust this code");
+ __ tbnz(Rtemp, exact_log2(markWord::biased_lock_bit_in_place), slow_case);
}
- __ bics(Rtemp, Rtemp, ~markOopDesc::hash_mask_in_place);
- __ mov(R0, AsmOperand(Rtemp, lsr, markOopDesc::hash_shift), ne);
+ __ bics(Rtemp, Rtemp, ~markWord::hash_mask_in_place);
+ __ mov(R0, AsmOperand(Rtemp, lsr, markWord::hash_shift), ne);
__ bx(LR, ne);
__ bind(slow_case);
@@ -1172,7 +1172,7 @@
__ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
__ sub(disp_hdr, FP, lock_slot_fp_offset);
- __ tst(mark, markOopDesc::unlocked_value);
+ __ tst(mark, markWord::unlocked_value);
__ b(fast_lock, ne);
// Check for recursive lock
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -4045,7 +4045,7 @@
if (UseBiasedLocking) {
__ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
} else {
- __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
+ __ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
}
// mark
__ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -110,12 +110,12 @@
}
// ... and mark it unlocked.
- ori(Rmark, Rmark, markOopDesc::unlocked_value);
+ ori(Rmark, Rmark, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
- // Compare object markOop with Rmark and if equal exchange Rscratch with object markOop.
+ // Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/Rscratch,
@@ -137,7 +137,7 @@
bind(cas_failed);
// We did not find an unlocked object so see if this is a recursive case.
sub(Rscratch, Rscratch, R1_SP);
- load_const_optimized(R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
and_(R0/*==0?*/, Rscratch, R0);
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
bne(CCR0, slow_int);
@@ -171,7 +171,7 @@
}
// Check if it is still a light weight lock, this is is true if we see
- // the stack address of the basicLock in the markOop of the object.
+ // the stack address of the basicLock in the markWord of the object.
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/R0,
/*compare_value=*/Rbox,
@@ -215,7 +215,7 @@
if (UseBiasedLocking && !len->is_valid()) {
ld(t1, in_bytes(Klass::prototype_header_offset()), klass);
} else {
- load_const_optimized(t1, (intx)markOopDesc::prototype());
+ load_const_optimized(t1, (intx)markWord::prototype().value());
}
std(t1, oopDesc::mark_offset_in_bytes(), obj);
store_klass(obj, klass);
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -881,7 +881,7 @@
} else {
// template code:
//
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
@@ -903,17 +903,17 @@
assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
if (UseBiasedLocking) {
biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
}
- // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
- ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+ // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
+ ori(displaced_header, displaced_header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
@@ -949,12 +949,12 @@
// We did not see an unlocked object so try the fast recursive case.
- // Check if owner is self by comparing the value in the markOop of object
+ // Check if owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
- load_const_optimized(tmp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
+ load_const_optimized(tmp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, tmp);
// If condition is true we are done and hence we can store 0 in the displaced
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -2078,7 +2078,7 @@
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
"biased locking makes assumptions about bit layout");
if (PrintBiasedLockingStatistics) {
@@ -2088,13 +2088,13 @@
stwx(temp_reg, temp2_reg);
}
- andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
- cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+ andi(temp_reg, mark_reg, markWord::biased_lock_mask_in_place);
+ cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
bne(cr_reg, cas_label);
load_klass(temp_reg, obj_reg);
- load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
+ load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
orr(temp_reg, R16_thread, temp_reg);
xorr(temp_reg, mark_reg, temp_reg);
@@ -2125,7 +2125,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
+ andi(temp2_reg, temp_reg, markWord::biased_lock_mask_in_place);
cmpwi(cr_reg, temp2_reg, 0);
bne(cr_reg, try_revoke_bias);
@@ -2139,10 +2139,10 @@
// otherwise the manipulations it performs on the mark word are
// illegal.
- int shift_amount = 64 - markOopDesc::epoch_shift;
+ int shift_amount = 64 - markWord::epoch_shift;
// rotate epoch bits to right (little) end and set other bits to 0
// [ big part | epoch | little part ] -> [ 0..0 | epoch ]
- rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
+ rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markWord::epoch_bits);
// branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
bne(CCR0, try_rebias);
@@ -2152,9 +2152,9 @@
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
- andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
- markOopDesc::age_mask_in_place |
- markOopDesc::epoch_mask_in_place));
+ andi(mark_reg, mark_reg, (markWord::biased_lock_mask_in_place |
+ markWord::age_mask_in_place |
+ markWord::epoch_mask_in_place));
orr(temp_reg, R16_thread, mark_reg);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@@ -2187,7 +2187,7 @@
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
load_klass(temp_reg, obj_reg);
- andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
+ andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
orr(temp2_reg, R16_thread, temp2_reg);
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
orr(temp_reg, temp2_reg, temp_reg);
@@ -2224,7 +2224,7 @@
// normal locking code.
load_klass(temp_reg, obj_reg);
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
- andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
+ andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
orr(temp_reg, temp_reg, temp2_reg);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@@ -2236,7 +2236,7 @@
MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock());
- // reload markOop in mark_reg before continuing with lightweight locking
+ // reload markWord in mark_reg before continuing with lightweight locking
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
// Fall through to the normal CAS-based lock, because no matter what
@@ -2264,9 +2264,9 @@
// the bias bit would be clear.
ld(temp_reg, 0, mark_addr);
- andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
-
- cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+ andi(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
+
+ cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
beq(cr_reg, done);
}
@@ -2687,7 +2687,7 @@
load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
bind(L_rtm_retry);
}
- andi_(R0, mark_word, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+ andi_(R0, mark_word, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
bne(CCR0, IsInflated);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@@ -2705,10 +2705,10 @@
}
tbegin_();
beq(CCR0, L_on_abort);
- ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
- andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
- beq(flag, DONE_LABEL); // all done if unlocked
+ ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
+ andi(R0, mark_word, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
+ beq(flag, DONE_LABEL); // all done if unlocked
if (UseRTMXendForLockBusy) {
tend_();
@@ -2744,9 +2744,9 @@
assert(UseRTMLocking, "why call this otherwise?");
Label L_rtm_retry, L_decrement_retry, L_on_abort;
// Clean monitor_value bit to get valid pointer.
- int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
-
- // Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark().
+ int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markWord::monitor_value;
+
+ // Store non-null, using boxReg instead of (intptr_t)markWord::unused_mark().
std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
const Register tmpReg = boxReg;
const Register owner_addr_Reg = mark_word;
@@ -2791,7 +2791,7 @@
// Restore owner_addr_Reg
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);
#ifdef ASSERT
- andi_(R0, mark_word, markOopDesc::monitor_value);
+ andi_(R0, mark_word, markWord::monitor_value);
asm_assert_ne("must be inflated", 0xa754); // Deflating only allowed at safepoint.
#endif
addi(owner_addr_Reg, mark_word, owner_offset);
@@ -2833,7 +2833,7 @@
Label object_has_monitor;
Label cas_failed;
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
@@ -2851,11 +2851,11 @@
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
- andi_(temp, displaced_header, markOopDesc::monitor_value);
+ andi_(temp, displaced_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
- // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
- ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+ // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
+ ori(displaced_header, displaced_header, markWord::unlocked_value);
// Load Compare Value application register.
@@ -2863,7 +2863,7 @@
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
- // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
+ // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/displaced_header,
@@ -2883,10 +2883,10 @@
bind(cas_failed);
// We did not see an unlocked object so try the fast recursive case.
- // Check if the owner is self by comparing the value in the markOop of object
+ // Check if the owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
- load_const_optimized(temp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
+ load_const_optimized(temp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
and_(R0/*==0?*/, current_header, temp);
// If condition is true we are cont and hence we can store 0 as the
@@ -2910,7 +2910,7 @@
#endif // INCLUDE_RTM_OPT
// Try to CAS m->owner from NULL to current thread.
- addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
+ addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value);
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/(intptr_t)0,
@@ -2957,12 +2957,12 @@
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
- ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
- andi(R0, current_header, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
- bne(flag, L_regular_unlock); // else RegularLock
- tend_(); // otherwise end...
- b(cont); // ... and we're done
+ ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
+ andi(R0, current_header, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
+ bne(flag, L_regular_unlock); // else RegularLock
+ tend_(); // otherwise end...
+ b(cont); // ... and we're done
bind(L_regular_unlock);
}
#endif
@@ -2978,11 +2978,11 @@
// The object has an existing monitor iff (mark & monitor_value) != 0.
RTM_OPT_ONLY( if (!(UseRTMForStackLocks && use_rtm)) ) // skip load if already done
ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
- andi_(R0, current_header, markOopDesc::monitor_value);
+ andi_(R0, current_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
// Check if it is still a light weight lock, this is is true if we see
- // the stack address of the basicLock in the markOop of the object.
+ // the stack address of the basicLock in the markWord of the object.
// Cmpxchg sets flag to cmpd(current_header, box).
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
@@ -3000,7 +3000,7 @@
b(cont);
bind(object_has_monitor);
- addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
+ addi(current_header, current_header, -markWord::monitor_value); // monitor
ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
// It's inflated.
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3820,7 +3820,7 @@
if (UseBiasedLocking) {
__ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
} else {
- __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
+ __ load_const_optimized(Rscratch, markWord::prototype().value(), R0);
}
__ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -96,7 +96,7 @@
}
// and mark it as unlocked.
- z_oill(hdr, markOopDesc::unlocked_value);
+ z_oill(hdr, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
// Test if object header is still the same (i.e. unlocked), and if so, store the
@@ -115,19 +115,19 @@
// If the object header was not the same, it is now in the hdr register.
// => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
- // 1) (hdr & markOopDesc::lock_mask_in_place) == 0
+ // 1) (hdr & markWord::lock_mask_in_place) == 0
// 2) rsp <= hdr
// 3) hdr <= rsp + page_size
//
// These 3 tests can be done by evaluating the following expression:
//
- // (hdr - Z_SP) & (~(page_size-1) | markOopDesc::lock_mask_in_place)
+ // (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
z_sgr(hdr, Z_SP);
- load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
// For recursive locking, the result is zero. => Save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking).
@@ -192,7 +192,7 @@
z_lg(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t.
- load_const_optimized(t1, (intx)markOopDesc::prototype());
+ load_const_optimized(t1, (intx)markWord::prototype().value());
}
z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -41,14 +41,14 @@
void initialize_body(Register objectFields, Register len_in_bytes, Register Rzero);
// locking
- // hdr : Used to hold locked markOop to be CASed into obj, contents destroyed.
+ // hdr : Used to hold locked markWord to be CASed into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents preserved.
// Returns code offset at which to add null check debug information.
void lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
// unlocking
- // hdr : Used to hold original markOop to be CASed back into obj, contents destroyed.
+ // hdr : Used to hold original markWord to be CASed back into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents destroyed.
void unlock_object(Register hdr, Register obj, Register lock, Label& slow_case);
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -974,7 +974,7 @@
// template code:
//
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
@@ -993,17 +993,17 @@
NearLabel done;
NearLabel slow_case;
- // markOop displaced_header = obj->mark().set_unlocked();
+ // markWord displaced_header = obj->mark().set_unlocked();
- // Load markOop from object into displaced_header.
+ // Load markWord from object into displaced_header.
z_lg(displaced_header, oopDesc::mark_offset_in_bytes(), object);
if (UseBiasedLocking) {
biased_locking_enter(object, displaced_header, Z_R1, Z_R0, done, &slow_case);
}
- // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
- z_oill(displaced_header, markOopDesc::unlocked_value);
+ // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
+ z_oill(displaced_header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
@@ -1027,7 +1027,7 @@
// We did not see an unlocked object so try the fast recursive case.
- // Check if owner is self by comparing the value in the markOop of object
+ // Check if owner is self by comparing the value in the markWord of object
// (current_header) with the stack pointer.
z_sgr(current_header, Z_SP);
@@ -1035,7 +1035,7 @@
// The prior sequence "LGR, NGR, LTGR" can be done better
// (Z_R1 is temp and not used after here).
- load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(Z_R0, current_header); // AND sets CC (result eq/ne 0)
// If condition is true we are done and hence we can store 0 in the displaced
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3198,15 +3198,15 @@
// whether the epoch is still valid.
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits.
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
"biased locking makes assumptions about bit layout");
z_lr(temp_reg, mark_reg);
- z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
- z_chi(temp_reg, markOopDesc::biased_lock_pattern);
+ z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
+ z_chi(temp_reg, markWord::biased_lock_pattern);
z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked.
load_prototype_header(temp_reg, obj_reg);
- load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
+ load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
z_ogr(temp_reg, Z_thread);
z_xgr(temp_reg, mark_reg);
@@ -3232,7 +3232,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
+ z_tmll(temp_reg, markWord::biased_lock_mask_in_place);
z_brnaz(try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -3244,7 +3244,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
+ z_tmll(temp_reg, markWord::epoch_mask_in_place);
z_brnaz(try_rebias);
//----------------------------------------------------------------------------
@@ -3254,8 +3254,8 @@
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
- z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
- markOopDesc::epoch_mask_in_place);
+ z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place |
+ markWord::epoch_mask_in_place);
z_lgr(temp_reg, Z_thread);
z_llgfr(mark_reg, mark_reg);
z_ogr(temp_reg, mark_reg);
@@ -3287,7 +3287,7 @@
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
- z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
load_prototype_header(temp_reg, obj_reg);
z_llgfr(mark_reg, mark_reg);
@@ -3348,9 +3348,9 @@
BLOCK_COMMENT("biased_locking_exit {");
z_lg(temp_reg, 0, mark_addr);
- z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
-
- z_chi(temp_reg, markOopDesc::biased_lock_pattern);
+ z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
+
+ z_chi(temp_reg, markWord::biased_lock_pattern);
z_bre(done);
BLOCK_COMMENT("} biased_locking_exit");
}
@@ -3363,7 +3363,7 @@
BLOCK_COMMENT("compiler_fast_lock_object {");
- // Load markOop from oop into mark.
+ // Load markWord from oop into mark.
z_lg(displacedHeader, 0, oop);
if (try_bias) {
@@ -3372,13 +3372,13 @@
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
- guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
+ guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
z_lr(temp, displacedHeader);
- z_nill(temp, markOopDesc::monitor_value);
+ z_nill(temp, markWord::monitor_value);
z_brne(object_has_monitor);
- // Set mark to markOop | markOopDesc::unlocked_value.
- z_oill(displacedHeader, markOopDesc::unlocked_value);
+ // Set mark to markWord | markWord::unlocked_value.
+ z_oill(displacedHeader, markWord::unlocked_value);
// Load Compare Value application register.
@@ -3386,7 +3386,7 @@
z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
// Memory Fence (in cmpxchgd)
- // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
+ // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
// If the compare-and-swap succeeded, then we found an unlocked object and we
// have now locked it.
@@ -3397,7 +3397,7 @@
// We did not see an unlocked object so try the fast recursive case.
z_sgr(currentHeader, Z_SP);
- load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(currentHeader, temp);
// z_brne(done);
@@ -3407,7 +3407,7 @@
z_bru(done);
Register zero = temp;
- Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
+ Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
bind(object_has_monitor);
// The object's monitor m is unlocked iff m->owner == NULL,
// otherwise m->owner may contain a thread or a stack address.
@@ -3456,12 +3456,12 @@
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
- guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
- z_nill(currentHeader, markOopDesc::monitor_value);
+ guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
+ z_nill(currentHeader, markWord::monitor_value);
z_brne(object_has_monitor);
// Check if it is still a light weight lock, this is true if we see
- // the stack address of the basicLock in the markOop of the object
+ // the stack address of the basicLock in the markWord of the object
// copy box to currentHeader such that csg does not kill it.
z_lgr(currentHeader, box);
z_csg(currentHeader, displacedHeader, 0, oop);
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3880,7 +3880,7 @@
__ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes()));
} else {
__ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
- (long)markOopDesc::prototype());
+ (long)markWord::prototype().value());
}
__ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops.
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -97,12 +97,12 @@
mov(Rbox, Rscratch);
// and mark it unlocked
- or3(Rmark, markOopDesc::unlocked_value, Rmark);
+ or3(Rmark, markWord::unlocked_value, Rmark);
// save unlocked object header into the displaced header location on the stack
st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
- // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
+ // compare object markWord with Rmark and if equal exchange Rscratch with object markWord
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// if compare/exchange succeeded we found an unlocked object and we now have locked it
@@ -144,7 +144,7 @@
}
// Check if it is still a light weight lock, this is is true if we see
- // the stack address of the basicLock in the markOop of the object
+ // the stack address of the basicLock in the markWord of the object
cas_ptr(mark_addr.base(), Rbox, Rmark);
cmp(Rbox, Rmark);
@@ -179,7 +179,7 @@
if (UseBiasedLocking && !len->is_valid()) {
ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1);
} else {
- set((intx)markOopDesc::prototype(), t1);
+ set((intx)markWord::prototype().value(), t1);
}
st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
if (UseCompressedClassPointers) {
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1200,7 +1200,7 @@
assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
- // load markOop from object into mark_reg
+ // load markWord from object into mark_reg
ld_ptr(mark_addr, mark_reg);
if (UseBiasedLocking) {
@@ -1211,11 +1211,11 @@
// we need a temporary register here as we do not want to clobber lock_reg
// (cas clobbers the destination register)
mov(lock_reg, temp_reg);
- // set mark reg to be (markOop of object | UNLOCK_VALUE)
- or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
+ // set mark reg to be (markWord of object | UNLOCK_VALUE)
+ or3(mark_reg, markWord::unlocked_value, mark_reg);
// initialize the box (Must happen before we update the object mark!)
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
- // compare and exchange object_addr, markOop | 1, stack address of basicLock
+ // compare and exchange object_addr, markWord | 1, stack address of basicLock
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
@@ -1224,7 +1224,7 @@
// We did not see an unlocked object so try the fast recursive case
- // Check if owner is self by comparing the value in the markOop of object
+ // Check if owner is self by comparing the value in the markWord of object
// with the stack pointer
sub(temp_reg, SP, temp_reg);
sub(temp_reg, STACK_BIAS, temp_reg);
@@ -1234,7 +1234,7 @@
// (a) %sp -vs- markword proximity check, and,
// (b) verify mark word LSBs == 0 (Stack-locked).
//
- // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
+ // FFFFF003/FFFFFFFFFFFF003 is (markWord::lock_mask_in_place | -os::vm_page_size())
// Note that the page size used for %sp proximity testing is arbitrary and is
// unrelated to the actual MMU page size. We use a 'logical' page size of
// 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -2452,15 +2452,15 @@
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
- and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
- cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ and3(mark_reg, markWord::biased_lock_mask_in_place, temp_reg);
+ cmp_and_brx_short(temp_reg, markWord::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
- andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
+ andcc(temp_reg, ~((int) markWord::age_mask_in_place), temp_reg);
if (counters != NULL) {
cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
// Reload mark_reg as we may need it later
@@ -2483,7 +2483,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
+ btst(markWord::biased_lock_mask_in_place, temp_reg);
brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -2495,7 +2495,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
+ delayed()->btst(markWord::epoch_mask_in_place, temp_reg);
brx(Assembler::notZero, false, Assembler::pn, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@@ -2505,7 +2505,7 @@
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
delayed()->and3(mark_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
+ markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place,
mark_reg);
or3(G2_thread, mark_reg, temp_reg);
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
@@ -2586,8 +2586,8 @@
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
ld_ptr(mark_addr, temp_reg);
- and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
- cmp(temp_reg, markOopDesc::biased_lock_pattern);
+ and3(temp_reg, markWord::biased_lock_mask_in_place, temp_reg);
+ cmp(temp_reg, markWord::biased_lock_pattern);
brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
delayed();
if (!allow_delay_slot_filling) {
@@ -2603,12 +2603,12 @@
// box->dhw disposition - post-conditions at DONE_LABEL.
// - Successful inflated lock: box->dhw != 0.
// Any non-zero value suffices.
-// Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark()
+// Consider G2_thread, rsp, boxReg, or markWord::unused_mark()
// - Successful Stack-lock: box->dhw == mark.
// box->dhw must contain the displaced mark word value
// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
// The slow-path fast_enter() and slow_enter() operators
-// are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()).
+// are responsible for setting box->dhw = NonZero (typically markWord::unused_mark()).
// - Biased: box->dhw is undefined
//
// SPARC refworkload performance - specifically jetstream and scimark - are
@@ -2658,7 +2658,7 @@
// This presumes TSO, of course.
mov(0, Rscratch);
- or3(Rmark, markOopDesc::unlocked_value, Rmark);
+ or3(Rmark, markWord::unlocked_value, Rmark);
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
@@ -2712,7 +2712,7 @@
// set icc.zf : 1=success 0=failure
// ST box->displaced_header = NonZero.
// Any non-zero value suffices:
- // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
+ // markWord::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
// Intentional fall-through into done
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1835,19 +1835,19 @@
// hash_mask_in_place because it could be larger than 32 bits in a 64-bit
// vm: see markOop.hpp.
__ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
- __ sethi(markOopDesc::hash_mask, mask);
- __ btst(markOopDesc::unlocked_value, header);
+ __ sethi(markWord::hash_mask, mask);
+ __ btst(markWord::unlocked_value, header);
__ br(Assembler::zero, false, Assembler::pn, slowCase);
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
__ delayed()->nop();
- __ btst(markOopDesc::biased_lock_bit_in_place, header);
+ __ btst(markWord::biased_lock_bit_in_place, header);
__ br(Assembler::notZero, false, Assembler::pn, slowCase);
}
- __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
+ __ delayed()->or3(mask, markWord::hash_mask & 0x3ff, mask);
// Check for a valid (non-zero) hash code and get its value.
- __ srlx(header, markOopDesc::hash_shift, hash);
+ __ srlx(header, markWord::hash_shift, hash);
__ andcc(hash, mask, hash);
__ br(Assembler::equal, false, Assembler::pn, slowCase);
__ delayed()->nop();
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3517,7 +3517,7 @@
if (UseBiasedLocking) {
__ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
} else {
- __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
+ __ set((intptr_t)markWord::prototype().value(), G4_scratch);
}
__ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
__ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -61,7 +61,7 @@
// Load object header
movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
- orptr(hdr, markOopDesc::unlocked_value);
+ orptr(hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
movptr(Address(disp_hdr, 0), hdr);
// test if object header is still the same (i.e. unlocked), and if so, store the
@@ -156,7 +156,7 @@
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else {
// This assumes that all prototype bits fit in an int32_t
- movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markWord::prototype().value());
}
#ifdef _LP64
if (UseCompressedClassPointers) { // Take care not to kill klass
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -350,9 +350,9 @@
Label done;
__ movptr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
__ notptr(tmp);
- __ testb(tmp, markOopDesc::marked_value);
+ __ testb(tmp, markWord::marked_value);
__ jccb(Assembler::notZero, done);
- __ orptr(tmp, markOopDesc::marked_value);
+ __ orptr(tmp, markWord::marked_value);
__ notptr(tmp);
__ mov(dst, tmp);
__ bind(done);
@@ -824,15 +824,15 @@
// then test for both bits clear.
__ notptr(tmp1);
#ifdef _LP64
- __ testb(tmp1, markOopDesc::marked_value);
+ __ testb(tmp1, markWord::marked_value);
#else
// On x86_32, C1 register allocator can give us the register without 8-bit support.
// Do the full-register access and test to avoid compilation failures.
- __ testptr(tmp1, markOopDesc::marked_value);
+ __ testptr(tmp1, markWord::marked_value);
#endif
__ jccb(Assembler::notZero, slow_path);
// Clear both lower bits. It's still inverted, so set them, and then invert back.
- __ orptr(tmp1, markOopDesc::marked_value);
+ __ orptr(tmp1, markWord::marked_value);
__ notptr(tmp1);
// At this point, tmp1 contains the decoded forwarding pointer.
__ mov(res, tmp1);
@@ -963,10 +963,10 @@
// Test if both lowest bits are set. We trick it by negating the bits
// then test for both bits clear.
__ notptr(tmp2);
- __ testb(tmp2, markOopDesc::marked_value);
+ __ testb(tmp2, markWord::marked_value);
__ jccb(Assembler::notZero, slow_path);
// Clear both lower bits. It's still inverted, so set them, and then invert back.
- __ orptr(tmp2, markOopDesc::marked_value);
+ __ orptr(tmp2, markWord::marked_value);
__ notptr(tmp2);
// At this point, tmp2 contains the decoded forwarding pointer.
__ mov(rax, tmp2);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1115,7 +1115,7 @@
assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
assert(tmp_reg != noreg, "tmp_reg must be supplied");
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
NOT_LP64( Address saved_mark_addr(lock_reg, 0); )
@@ -1135,8 +1135,8 @@
movptr(swap_reg, mark_addr);
}
movptr(tmp_reg, swap_reg);
- andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
+ andptr(tmp_reg, markWord::biased_lock_mask_in_place);
+ cmpptr(tmp_reg, markWord::biased_lock_pattern);
jcc(Assembler::notEqual, cas_label);
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
@@ -1162,7 +1162,7 @@
xorptr(swap_reg, tmp_reg);
Register header_reg = swap_reg;
#endif
- andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
+ andptr(header_reg, ~((int) markWord::age_mask_in_place));
if (counters != NULL) {
cond_inc32(Assembler::zero,
ExternalAddress((address) counters->biased_lock_entry_count_addr()));
@@ -1181,7 +1181,7 @@
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
- testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
+ testptr(header_reg, markWord::biased_lock_mask_in_place);
jccb(Assembler::notZero, try_revoke_bias);
// Biasing is still enabled for this data type. See whether the
@@ -1193,7 +1193,7 @@
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
- testptr(header_reg, markOopDesc::epoch_mask_in_place);
+ testptr(header_reg, markWord::epoch_mask_in_place);
jccb(Assembler::notZero, try_rebias);
// The epoch of the current bias is still valid but we know nothing
@@ -1204,7 +1204,7 @@
// don't accidentally blow away another thread's valid bias.
NOT_LP64( movptr(swap_reg, saved_mark_addr); )
andptr(swap_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
#ifdef _LP64
movptr(tmp_reg, swap_reg);
orptr(tmp_reg, r15_thread);
@@ -1298,8 +1298,8 @@
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
+ andptr(temp_reg, markWord::biased_lock_mask_in_place);
+ cmpptr(temp_reg, markWord::biased_lock_pattern);
jcc(Assembler::equal, done);
}
@@ -1486,7 +1486,7 @@
bind(L_rtm_retry);
}
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
- testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+ testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
jcc(Assembler::notZero, IsInflated);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@@ -1501,8 +1501,8 @@
}
xbegin(L_on_abort);
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
- andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
+ andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
@@ -1528,7 +1528,7 @@
// Use RTM for inflating locks
// inputs: objReg (object to lock)
// boxReg (on-stack box address (displaced header location) - KILLED)
-// tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
+// tmpReg (ObjectMonitor address + markWord::monitor_value)
void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
Register scrReg, Register retry_on_busy_count_Reg,
Register retry_on_abort_count_Reg,
@@ -1542,7 +1542,7 @@
int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
// Without cast to int32_t a movptr will destroy r10 which is typically obj
- movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+ movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value()));
movptr(boxReg, tmpReg); // Save ObjectMonitor address
if (RTMRetryCount > 0) {
@@ -1748,11 +1748,11 @@
#endif // INCLUDE_RTM_OPT
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // [FETCH]
- testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+ testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
jccb(Assembler::notZero, IsInflated);
// Attempt stack-locking ...
- orptr (tmpReg, markOopDesc::unlocked_value);
+ orptr (tmpReg, markWord::unlocked_value);
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
lock();
cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
@@ -1776,7 +1776,7 @@
jmp(DONE_LABEL);
bind(IsInflated);
- // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
+ // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
#if INCLUDE_RTM_OPT
// Use the same RTM locking code in 32- and 64-bit VM.
@@ -1791,7 +1791,7 @@
// boxReg refers to the on-stack BasicLock in the current frame.
// We'd like to write:
- // set box->_displaced_header = markOopDesc::unused_mark(). Any non-0 value suffices.
+ // set box->_displaced_header = markWord::unused_mark(). Any non-0 value suffices.
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
// additional latency as we have another ST in the store buffer that must drain.
@@ -1836,9 +1836,9 @@
lock();
cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
- // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
+ // Unconditionally set box->_displaced_header = markWord::unused_mark().
// Without cast to int32_t movptr will destroy r10 which is typically obj.
- movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+ movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value()));
// Intentional fall-through into DONE_LABEL ...
// Propagate ICC.ZF from CAS above into DONE_LABEL.
#endif // _LP64
@@ -1906,20 +1906,20 @@
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
- movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
- andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
- cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
- jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
- xend(); // otherwise end...
- jmp(DONE_LABEL); // ... and we're done
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
+ andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
+ jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
+ xend(); // otherwise end...
+ jmp(DONE_LABEL); // ... and we're done
bind(L_regular_unlock);
}
#endif
- cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
- jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
- movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
- testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
+ cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
+ jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
+ testptr(tmpReg, markWord::monitor_value); // Inflated?
jccb (Assembler::zero, Stacked);
// It's inflated.
--- a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -59,12 +59,12 @@
__ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
// check if locked
- __ testptr(result, markOopDesc::unlocked_value);
+ __ testptr(result, markWord::unlocked_value);
__ jcc(Assembler::zero, slowCase);
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
- __ testptr(result, markOopDesc::biased_lock_bit_in_place);
+ __ testptr(result, markWord::biased_lock_bit_in_place);
__ jcc(Assembler::notZero, slowCase);
}
@@ -73,16 +73,16 @@
// Read the header and build a mask to get its hash field.
// Depend on hash_mask being at most 32 bits and avoid the use of hash_mask_in_place
// because it could be larger than 32 bits in a 64-bit vm. See markOop.hpp.
- __ shrptr(result, markOopDesc::hash_shift);
- __ andptr(result, markOopDesc::hash_mask);
+ __ shrptr(result, markWord::hash_shift);
+ __ andptr(result, markWord::hash_mask);
#else
- __ andptr(result, markOopDesc::hash_mask_in_place);
+ __ andptr(result, markWord::hash_mask_in_place);
#endif //_LP64
// test if hashCode exists
__ jcc(Assembler::zero, slowCase);
#ifndef _LP64
- __ shrptr(result, markOopDesc::hash_shift);
+ __ shrptr(result, markWord::hash_shift);
#endif
__ ret(0);
__ bind(slowCase);
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -4108,7 +4108,7 @@
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
- (intptr_t)markOopDesc::prototype()); // header
+ (intptr_t)markWord::prototype().value()); // header
__ pop(rcx); // get saved klass back in the register.
}
#ifdef _LP64
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -273,12 +273,12 @@
if (method->is_synchronized()) {
monitor = (BasicObjectLock*) istate->stack_base();
oop lockee = monitor->obj();
- markOop disp = lockee->mark()->set_unlocked();
+ markWord disp = lockee->mark().set_unlocked();
monitor->lock()->set_displaced_header(disp);
- if (lockee->cas_set_mark((markOop)monitor, disp) != disp) {
- if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
- monitor->lock()->set_displaced_header(NULL);
+ if (lockee->cas_set_mark(markWord::from_pointer(monitor), disp) != disp) {
+ if (thread->is_lock_owned((address) disp.clear_lock_bits().to_pointer())) {
+ monitor->lock()->set_displaced_header(markWord::from_pointer(NULL));
}
else {
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
@@ -413,12 +413,12 @@
// Unlock if necessary
if (monitor) {
BasicLock *lock = monitor->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
oop rcvr = monitor->obj();
monitor->set_obj(NULL);
- if (header != NULL) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL) {
+ markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
monitor->set_obj(rcvr); {
HandleMark hm(thread);
--- a/src/hotspot/share/classfile/altHashing.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/classfile/altHashing.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -35,8 +35,8 @@
// objects. We don't want to call the synchronizer hash code to install
// this value because it may safepoint.
static intptr_t object_hash(Klass* k) {
- intptr_t hc = k->java_mirror()->mark()->hash();
- return hc != markOopDesc::no_hash ? hc : os::random();
+ intptr_t hc = k->java_mirror()->mark().hash();
+ return hc != markWord::no_hash ? hc : os::random();
}
// Seed value used for each alternative hash calculated.
--- a/src/hotspot/share/classfile/systemDictionary.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -2154,7 +2154,7 @@
// NOTE that we must only do this when the class is initally
// defined, not each time it is referenced from a new class loader
if (oopDesc::equals(k->class_loader(), class_loader())) {
- k->set_prototype_header(markOopDesc::biased_locking_prototype());
+ k->set_prototype_header(markWord::biased_locking_prototype());
}
}
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -736,7 +736,7 @@
size_t PromotionInfo::refillSize() const {
const size_t CMSSpoolBlockSize = 256;
- const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
+ const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markWord)
* CMSSpoolBlockSize);
return CompactibleFreeListSpace::adjustObjectSize(sz);
}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1010,7 +1010,7 @@
// Things to support parallel young-gen collection.
oop
ConcurrentMarkSweepGeneration::par_promote(int thread_num,
- oop old, markOop m,
+ oop old, markWord m,
size_t word_sz) {
#ifndef PRODUCT
if (CMSHeap::heap()->promotion_should_fail()) {
@@ -7776,10 +7776,10 @@
assert(stack->capacity() > num, "Shouldn't bite more than can chew");
size_t i = num;
oop cur = _overflow_list;
- const markOop proto = markOopDesc::prototype();
+ const markWord proto = markWord::prototype();
NOT_PRODUCT(ssize_t n = 0;)
for (oop next; i > 0 && cur != NULL; cur = next, i--) {
- next = oop(cur->mark_raw());
+ next = oop(cur->mark_raw().to_pointer());
cur->set_mark_raw(proto); // until proven otherwise
assert(oopDesc::is_oop(cur), "Should be an oop");
bool res = stack->push(cur);
@@ -7863,8 +7863,8 @@
size_t i = num;
oop cur = prefix;
// Walk down the first "num" objects, unless we reach the end.
- for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
- if (cur->mark_raw() == NULL) {
+ for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
+ if (cur->mark_raw().to_pointer() == NULL) {
// We have "num" or fewer elements in the list, so there
// is nothing to return to the global list.
// Write back the NULL in lieu of the BUSY we wrote
@@ -7874,9 +7874,9 @@
}
} else {
// Chop off the suffix and return it to the global list.
- assert(cur->mark_raw() != BUSY, "Error");
- oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
- cur->set_mark_raw(NULL); // break off suffix
+ assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
+ oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
+ cur->set_mark_raw(markWord::from_pointer(NULL)); // break off suffix
// It's possible that the list is still in the empty(busy) state
// we left it in a short while ago; in that case we may be
// able to place back the suffix without incurring the cost
@@ -7896,18 +7896,18 @@
// Too bad, someone else sneaked in (at least) an element; we'll need
// to do a splice. Find tail of suffix so we can prepend suffix to global
// list.
- for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
+ for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
oop suffix_tail = cur;
- assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
+ assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
"Tautology");
observed_overflow_list = _overflow_list;
do {
cur_overflow_list = observed_overflow_list;
if (cur_overflow_list != BUSY) {
// Do the splice ...
- suffix_tail->set_mark_raw(markOop(cur_overflow_list));
+ suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
} else { // cur_overflow_list == BUSY
- suffix_tail->set_mark_raw(NULL);
+ suffix_tail->set_mark_raw(markWord::from_pointer(NULL));
}
// ... and try to place spliced list back on overflow_list ...
observed_overflow_list =
@@ -7919,11 +7919,11 @@
// Push the prefix elements on work_q
assert(prefix != NULL, "control point invariant");
- const markOop proto = markOopDesc::prototype();
+ const markWord proto = markWord::prototype();
oop next;
NOT_PRODUCT(ssize_t n = 0;)
for (cur = prefix; cur != NULL; cur = next) {
- next = oop(cur->mark_raw());
+ next = oop(cur->mark_raw().to_pointer());
cur->set_mark_raw(proto); // until proven otherwise
assert(oopDesc::is_oop(cur), "Should be an oop");
bool res = work_q->push(cur);
@@ -7942,7 +7942,7 @@
NOT_PRODUCT(_num_par_pushes++;)
assert(oopDesc::is_oop(p), "Not an oop");
preserve_mark_if_necessary(p);
- p->set_mark_raw((markOop)_overflow_list);
+ p->set_mark_raw(markWord::from_pointer(_overflow_list));
_overflow_list = p;
}
@@ -7956,9 +7956,9 @@
do {
cur_overflow_list = observed_overflow_list;
if (cur_overflow_list != BUSY) {
- p->set_mark_raw(markOop(cur_overflow_list));
+ p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
} else {
- p->set_mark_raw(NULL);
+ p->set_mark_raw(markWord::from_pointer(NULL));
}
observed_overflow_list =
Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
@@ -7980,7 +7980,7 @@
// the VM can then be changed, incrementally, to deal with such
// failures where possible, thus, incrementally hardening the VM
// in such low resource situations.
-void CMSCollector::preserve_mark_work(oop p, markOop m) {
+void CMSCollector::preserve_mark_work(oop p, markWord m) {
_preserved_oop_stack.push(p);
_preserved_mark_stack.push(m);
assert(m == p->mark_raw(), "Mark word changed");
@@ -7990,15 +7990,15 @@
// Single threaded
void CMSCollector::preserve_mark_if_necessary(oop p) {
- markOop m = p->mark_raw();
- if (m->must_be_preserved(p)) {
+ markWord m = p->mark_raw();
+ if (m.must_be_preserved(p)) {
preserve_mark_work(p, m);
}
}
void CMSCollector::par_preserve_mark_if_necessary(oop p) {
- markOop m = p->mark_raw();
- if (m->must_be_preserved(p)) {
+ markWord m = p->mark_raw();
+ if (m.must_be_preserved(p)) {
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
// Even though we read the mark word without holding
// the lock, we are assured that it will not change
@@ -8038,9 +8038,9 @@
oop p = _preserved_oop_stack.pop();
assert(oopDesc::is_oop(p), "Should be an oop");
assert(_span.contains(p), "oop should be in _span");
- assert(p->mark_raw() == markOopDesc::prototype(),
+ assert(p->mark_raw() == markWord::prototype(),
"Set when taken from overflow list");
- markOop m = _preserved_mark_stack.pop();
+ markWord m = _preserved_mark_stack.pop();
p->set_mark_raw(m);
}
assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -541,8 +541,8 @@
// The following array-pair keeps track of mark words
// displaced for accommodating overflow list above.
// This code will likely be revisited under RFE#4922830.
- Stack<oop, mtGC> _preserved_oop_stack;
- Stack<markOop, mtGC> _preserved_mark_stack;
+ Stack<oop, mtGC> _preserved_oop_stack;
+ Stack<markWord, mtGC> _preserved_mark_stack;
// In support of multi-threaded concurrent phases
YieldingFlexibleWorkGang* _conc_workers;
@@ -742,7 +742,7 @@
void preserve_mark_if_necessary(oop p);
void par_preserve_mark_if_necessary(oop p);
- void preserve_mark_work(oop p, markOop m);
+ void preserve_mark_work(oop p, markWord m);
void restore_preserved_marks_if_any();
NOT_PRODUCT(bool no_preserved_marks() const;)
// In support of testing overflow code
@@ -1136,7 +1136,7 @@
// Overrides for parallel promotion.
virtual oop par_promote(int thread_num,
- oop obj, markOop m, size_t word_sz);
+ oop obj, markWord m, size_t word_sz);
virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
--- a/src/hotspot/share/gc/cms/freeChunk.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/freeChunk.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -56,14 +56,14 @@
class FreeChunk {
friend class VMStructs;
- // For 64 bit compressed oops, the markOop encodes both the size and the
+ // For 64 bit compressed oops, the markWord encodes both the size and the
// indication that this is a FreeChunk and not an object.
volatile size_t _size;
FreeChunk* _prev;
FreeChunk* _next;
- markOop mark() const volatile { return (markOop)_size; }
- void set_mark(markOop m) { _size = (size_t)m; }
+ markWord mark() const volatile { return markWord((uintptr_t)_size); }
+ void set_mark(markWord m) { _size = (size_t)m.value(); }
public:
NOT_PRODUCT(static const size_t header_size();)
@@ -79,7 +79,7 @@
}
bool is_free() const volatile {
- LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
+ LP64_ONLY(if (UseCompressedOops) return mark().is_cms_free_chunk(); else)
return (((intptr_t)_prev) & 0x1) == 0x1;
}
bool cantCoalesce() const {
@@ -100,11 +100,11 @@
debug_only(void* size_addr() const { return (void*)&_size; })
size_t size() const volatile {
- LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
+ LP64_ONLY(if (UseCompressedOops) return mark().get_size(); else )
return _size;
}
void set_size(size_t sz) {
- LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
+ LP64_ONLY(if (UseCompressedOops) set_mark(markWord::set_size_and_free(sz)); else )
_size = sz;
}
@@ -126,7 +126,7 @@
#ifdef _LP64
if (UseCompressedOops) {
OrderAccess::storestore();
- set_mark(markOopDesc::prototype());
+ set_mark(markWord::prototype());
}
#endif
assert(!is_free(), "Error");
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1078,7 +1078,7 @@
oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop old,
size_t sz,
- markOop m) {
+ markWord m) {
// In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that
// the caller observed it to be not forwarded at some time in the past.
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -381,7 +381,7 @@
// that must not contain a forwarding pointer (though one might be
// inserted in "obj"s mark word by a parallel thread).
oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
- oop obj, size_t obj_sz, markOop m);
+ oop obj, size_t obj_sz, markWord m);
// in support of testing overflow code
NOT_PRODUCT(int _overflow_counter;)
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -44,9 +44,9 @@
// we need to ensure that it is copied (see comment in
// ParScanClosure::do_oop_work).
Klass* objK = obj->klass();
- markOop m = obj->mark_raw();
+ markWord m = obj->mark_raw();
oop new_obj;
- if (m->is_marked()) { // Contains forwarding pointer.
+ if (m.is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
} else {
size_t obj_sz = obj->size_given_klass(objK);
@@ -108,9 +108,9 @@
// overwritten with an overflow next pointer after the object is
// forwarded.
Klass* objK = obj->klass();
- markOop m = obj->mark_raw();
+ markWord m = obj->mark_raw();
oop new_obj;
- if (m->is_marked()) { // Contains forwarding pointer.
+ if (m.is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/promotionInfo.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -62,12 +62,12 @@
// Return the next displaced header, incrementing the pointer and
// recycling spool area as necessary.
-markOop PromotionInfo::nextDisplacedHeader() {
+markWord PromotionInfo::nextDisplacedHeader() {
assert(_spoolHead != NULL, "promotionInfo inconsistency");
assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
"Empty spool space: no displaced header can be fetched");
assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
- markOop hdr = _spoolHead->displacedHdr[_firstIndex];
+ markWord hdr = _spoolHead->displacedHdr[_firstIndex];
// Spool forward
if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
// forward to next block, recycling this block into spare spool buffer
@@ -93,15 +93,15 @@
void PromotionInfo::track(PromotedObject* trackOop, Klass* klassOfOop) {
// make a copy of header as it may need to be spooled
- markOop mark = oop(trackOop)->mark_raw();
+ markWord mark = oop(trackOop)->mark_raw();
trackOop->clear_next();
- if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
+ if (mark.must_be_preserved_for_cms_scavenge(klassOfOop)) {
// save non-prototypical header, and mark oop
saveDisplacedHeader(mark);
trackOop->setDisplacedMark();
} else {
// we'd like to assert something like the following:
- // assert(mark == markOopDesc::prototype(), "consistency check");
+ // assert(mark == markWord::prototype(), "consistency check");
// ... but the above won't work because the age bits have not (yet) been
// cleared. The remainder of the check would be identical to the
// condition checked in must_be_preserved() above, so we don't really
@@ -123,7 +123,7 @@
// Save the given displaced header, incrementing the pointer and
// obtaining more spool area as necessary.
-void PromotionInfo::saveDisplacedHeader(markOop hdr) {
+void PromotionInfo::saveDisplacedHeader(markWord hdr) {
assert(_spoolHead != NULL && _spoolTail != NULL,
"promotionInfo inconsistency");
assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
--- a/src/hotspot/share/gc/cms/promotionInfo.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/cms/promotionInfo.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -93,19 +93,19 @@
protected:
SpoolBlock* nextSpoolBlock;
size_t bufferSize; // number of usable words in this block
- markOop* displacedHdr; // the displaced headers start here
+ markWord* displacedHdr; // the displaced headers start here
// Note about bufferSize: it denotes the number of entries available plus 1;
// legal indices range from 1 through BufferSize - 1. See the verification
// code verify() that counts the number of displaced headers spooled.
size_t computeBufferSize() {
- return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
+ return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markWord);
}
public:
void init() {
bufferSize = computeBufferSize();
- displacedHdr = (markOop*)&displacedHdr;
+ displacedHdr = (markWord*)&displacedHdr;
nextSpoolBlock = NULL;
}
@@ -151,8 +151,8 @@
void track(PromotedObject* trackOop, Klass* klassOfOop); // keep track of a promoted oop
void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
CompactibleFreeListSpace* space() const { return _space; }
- markOop nextDisplacedHeader(); // get next header & forward spool pointer
- void saveDisplacedHeader(markOop hdr);
+ markWord nextDisplacedHeader(); // get next header & forward spool pointer
+ void saveDisplacedHeader(markWord hdr);
// save header and forward spool
inline size_t refillSize() const;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3137,7 +3137,7 @@
phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}
-void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
+void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m) {
if (!_evacuation_failed) {
_evacuation_failed = true;
}
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -813,7 +813,7 @@
// Preserve the mark of "obj", if necessary, in preparation for its mark
// word being overwritten with a self-forwarding-pointer.
- void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
+ void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
#ifndef PRODUCT
// Support for forcing evacuation failures. Analogous to
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -282,7 +282,7 @@
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
- // word is set to markOop::marked_value - effectively removing
+ // word is set to markWord::marked_value - effectively removing
// any hash values from the mark word. These hash values are
// used when verifying the dictionaries and so removing them
// from the mark word can make verification of the dictionaries
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -116,11 +116,11 @@
} else {
// Make sure object has the correct mark-word set or that it will be
// fixed when restoring the preserved marks.
- assert(object->mark_raw() == markOopDesc::prototype_for_object(object) || // Correct mark
- object->mark_raw()->must_be_preserved(object) || // Will be restored by PreservedMarksSet
+ assert(object->mark_raw() == markWord::prototype_for_object(object) || // Correct mark
+ object->mark_raw().must_be_preserved(object) || // Will be restored by PreservedMarksSet
(UseBiasedLocking && object->has_bias_pattern_raw()), // Will be restored by BiasedLocking
"should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
- p2i(object), p2i(object->mark_raw()), p2i(markOopDesc::prototype_for_object(object)));
+ p2i(object), object->mark_raw().value(), markWord::prototype_for_object(object).value());
}
assert(object->forwardee() == NULL, "should be forwarded to NULL");
}
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -50,8 +50,8 @@
}
// Marked by us, preserve if needed.
- markOop mark = obj->mark_raw();
- if (mark->must_be_preserved(obj) &&
+ markWord mark = obj->mark_raw();
+ if (mark.must_be_preserved(obj) &&
!G1ArchiveAllocator::is_open_archive_object(obj)) {
preserved_stack()->push(obj, mark);
}
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -77,11 +77,11 @@
oop forwardee = obj->forwardee();
if (forwardee == NULL) {
// Not forwarded, return current reference.
- assert(obj->mark_raw() == markOopDesc::prototype_for_object(obj) || // Correct mark
- obj->mark_raw()->must_be_preserved(obj) || // Will be restored by PreservedMarksSet
+ assert(obj->mark_raw() == markWord::prototype_for_object(obj) || // Correct mark
+ obj->mark_raw().must_be_preserved(obj) || // Will be restored by PreservedMarksSet
(UseBiasedLocking && obj->has_bias_pattern_raw()), // Will be restored by BiasedLocking
"Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
- p2i(obj), p2i(obj->mark_raw()), p2i(markOopDesc::prototype_for_object(obj)));
+ p2i(obj), obj->mark_raw().value(), markWord::prototype_for_object(obj).value());
return;
}
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -230,9 +230,9 @@
const G1HeapRegionAttr state = _g1h->region_attr(obj);
if (state.is_in_cset()) {
oop forwardee;
- markOop m = obj->mark_raw();
- if (m->is_marked()) {
- forwardee = (oop) m->decode_pointer();
+ markWord m = obj->mark_raw();
+ if (m.is_marked()) {
+ forwardee = (oop) m.decode_pointer();
} else {
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
}
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -196,10 +196,10 @@
}
}
-G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age) {
+G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
if (region_attr.is_young()) {
- age = !m->has_displaced_mark_helper() ? m->age()
- : m->displaced_mark_helper()->age();
+ age = !m.has_displaced_mark_helper() ? m.age()
+ : m.displaced_mark_helper().age();
if (age < _tenuring_threshold) {
return region_attr;
}
@@ -223,7 +223,7 @@
oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
oop const old,
- markOop const old_mark) {
+ markWord const old_mark) {
const size_t word_sz = old->size();
HeapRegion* const from_region = _g1h->heap_region_containing(old);
// +1 to make the -1 indexes valid...
@@ -281,18 +281,18 @@
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
if (dest_attr.is_young()) {
- if (age < markOopDesc::max_age) {
+ if (age < markWord::max_age) {
age++;
}
- if (old_mark->has_displaced_mark_helper()) {
+ if (old_mark.has_displaced_mark_helper()) {
// In this case, we have to install the mark word first,
// otherwise obj looks to be forwarded (the old mark word,
// which contains the forward pointer, was copied)
obj->set_mark_raw(old_mark);
- markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
- old_mark->set_displaced_mark_helper(new_mark);
+ markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
+ old_mark.set_displaced_mark_helper(new_mark);
} else {
- obj->set_mark_raw(old_mark->set_age(age));
+ obj->set_mark_raw(old_mark.set_age(age));
}
_age_table.add(age, word_sz);
} else {
@@ -376,7 +376,7 @@
}
}
-oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
+oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -203,7 +203,7 @@
size_t word_sz,
bool previous_plab_refill_failed);
- inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age);
+ inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
void report_promotion_event(G1HeapRegionAttr const dest_attr,
oop const old, size_t word_sz, uint age,
@@ -214,7 +214,7 @@
inline void trim_queue_to_threshold(uint threshold);
public:
- oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markOop const old_mark);
+ oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markWord const old_mark);
void trim_queue();
void trim_queue_partially();
@@ -225,7 +225,7 @@
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
// An attempt to evacuate "obj" has failed; take necessary steps.
- oop handle_evacuation_failure_par(oop obj, markOop m);
+ oop handle_evacuation_failure_par(oop obj, markWord m);
template <typename T>
inline void remember_root_into_optional_region(T* p);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -53,9 +53,9 @@
return;
}
- markOop m = obj->mark_raw();
- if (m->is_marked()) {
- obj = (oop) m->decode_pointer();
+ markWord m = obj->mark_raw();
+ if (m.is_marked()) {
+ obj = (oop) m.decode_pointer();
} else {
obj = copy_to_survivor_space(region_attr, obj, m);
}
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -75,7 +75,7 @@
// The object forwarding code is duplicated. Factor this out!!!!!
//
// This method "precompacts" objects inside its space to dest. It places forwarding
-// pointers into markOops for use by adjust_pointers. If "dest" should overflow, we
+// pointers into markWords for use by adjust_pointers. If "dest" should overflow, we
// finish by compacting into our own space.
void PSMarkSweepDecorator::precompact() {
@@ -113,8 +113,8 @@
const intx interval = PrefetchScanIntervalInBytes;
while (q < t) {
- assert(oop(q)->mark_raw()->is_marked() || oop(q)->mark_raw()->is_unlocked() ||
- oop(q)->mark_raw()->has_bias_pattern(),
+ assert(oop(q)->mark_raw().is_marked() || oop(q)->mark_raw().is_unlocked() ||
+ oop(q)->mark_raw().has_bias_pattern(),
"these are the only valid states during a mark sweep");
if (oop(q)->is_gc_marked()) {
/* prefetch beyond q */
@@ -259,7 +259,7 @@
if (allowed_deadspace_words >= deadlength) {
allowed_deadspace_words -= deadlength;
CollectedHeap::fill_with_object(q, deadlength);
- oop(q)->set_mark_raw(oop(q)->mark_raw()->set_marked());
+ oop(q)->set_mark_raw(oop(q)->mark_raw().set_marked());
assert((int) deadlength == oop(q)->size(), "bad filler object size");
// Recall that we required "q == compaction_top".
return true;
@@ -350,7 +350,7 @@
q = t;
} else {
// $$$ Funky
- q = (HeapWord*) oop(_first_dead)->mark_raw()->decode_pointer();
+ q = (HeapWord*) oop(_first_dead)->mark_raw().decode_pointer();
}
}
@@ -361,7 +361,7 @@
if (!oop(q)->is_gc_marked()) {
// mark is pointer to next marked oop
debug_only(prev_q = q);
- q = (HeapWord*) oop(q)->mark_raw()->decode_pointer();
+ q = (HeapWord*) oop(q)->mark_raw().decode_pointer();
assert(q > prev_q, "we should be moving forward through memory");
} else {
// prefetch beyond q
--- a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -83,7 +83,7 @@
// so they can always fill with an array.
HeapWord* tlab_end = end() + filler_header_size;
typeArrayOop filler_oop = (typeArrayOop) top();
- filler_oop->set_mark_raw(markOopDesc::prototype());
+ filler_oop->set_mark_raw(markWord::prototype());
filler_oop->set_klass(Universe::intArrayKlassObj());
const size_t array_length =
pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
--- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -345,7 +345,7 @@
}
}
-oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
+oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) {
assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
// Attempt to CAS in the header.
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -178,7 +178,7 @@
// Promotion methods
template<bool promote_immediately> oop copy_to_survivor_space(oop o);
- oop oop_promotion_failed(oop obj, markOop obj_mark);
+ oop oop_promotion_failed(oop obj, markWord obj_mark);
void reset();
void register_preserved_marks(PreservedMarks* preserved_marks);
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -157,16 +157,16 @@
// NOTE! We must be very careful with any methods that access the mark
// in o. There may be multiple threads racing on it, and it may be forwarded
// at any time. Do not use oop methods for accessing the mark!
- markOop test_mark = o->mark_raw();
+ markWord test_mark = o->mark_raw();
// The same test as "o->is_forwarded()"
- if (!test_mark->is_marked()) {
+ if (!test_mark.is_marked()) {
bool new_obj_is_tenured = false;
size_t new_obj_size = o->size();
// Find the objects age, MT safe.
- uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
- test_mark->displaced_mark_helper()->age() : test_mark->age();
+ uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+ test_mark.displaced_mark_helper().age() : test_mark.age();
if (!promote_immediately) {
// Try allocating obj in to-space (unless too old)
@@ -260,7 +260,7 @@
assert(new_obj == o->forwardee(), "Sanity");
// Increment age if obj still in new generation. Now that
- // we're dealing with a markOop that cannot change, it is
+ // we're dealing with a markWord that cannot change, it is
// okay to use the non mt safe oop methods.
if (!new_obj_is_tenured) {
new_obj->incr_age();
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -431,7 +431,7 @@
heap->print_heap_before_gc();
heap->trace_heap_before_gc(&_gc_tracer);
- assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
+ assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
// Fill in TLABs
@@ -823,8 +823,8 @@
// Arguments must have been parsed
if (AlwaysTenure || NeverTenure) {
- assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
- "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
+ assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
+ "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
_tenuring_threshold = MaxTenuringThreshold;
} else {
// We want to smooth out our startup times for the AdaptiveSizePolicy
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -749,7 +749,7 @@
void PSYoungGen::compact() {
eden_mark_sweep()->compact(ZapUnusedHeapArea);
from_mark_sweep()->compact(ZapUnusedHeapArea);
- // Mark sweep stores preserved markOops in to space, don't disturb!
+ // Mark sweep stores preserved markWords in to space, don't disturb!
to_mark_sweep()->compact(false);
}
--- a/src/hotspot/share/gc/serial/markSweep.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/serial/markSweep.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -49,7 +49,7 @@
Stack<ObjArrayTask, mtGC> MarkSweep::_objarray_stack;
Stack<oop, mtGC> MarkSweep::_preserved_oop_stack;
-Stack<markOop, mtGC> MarkSweep::_preserved_mark_stack;
+Stack<markWord, mtGC> MarkSweep::_preserved_mark_stack;
size_t MarkSweep::_preserved_count = 0;
size_t MarkSweep::_preserved_count_max = 0;
PreservedMark* MarkSweep::_preserved_marks = NULL;
@@ -132,7 +132,7 @@
T heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
- if (!obj->mark_raw()->is_marked()) {
+ if (!obj->mark_raw().is_marked()) {
mark_object(obj);
follow_object(obj);
}
@@ -152,9 +152,9 @@
}
// We preserve the mark which should be replaced at the end and the location
-// that it will go. Note that the object that this markOop belongs to isn't
+// that it will go. Note that the object that this markWord belongs to isn't
// currently at that address but it will be after phase4
-void MarkSweep::preserve_mark(oop obj, markOop mark) {
+void MarkSweep::preserve_mark(oop obj, markWord mark) {
// We try to store preserved marks in the to space of the new generation since
// this is storage which should be available. Most of the time this should be
// sufficient space for the marks we need to preserve but if it isn't we fall
@@ -204,7 +204,7 @@
// deal with the overflow
while (!_preserved_oop_stack.is_empty()) {
oop obj = _preserved_oop_stack.pop();
- markOop mark = _preserved_mark_stack.pop();
+ markWord mark = _preserved_mark_stack.pop();
obj->set_mark_raw(mark);
}
}
--- a/src/hotspot/share/gc/serial/markSweep.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/serial/markSweep.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -100,7 +100,7 @@
static Stack<ObjArrayTask, mtGC> _objarray_stack;
// Space for storing/restoring mark word
- static Stack<markOop, mtGC> _preserved_mark_stack;
+ static Stack<markWord, mtGC> _preserved_mark_stack;
static Stack<oop, mtGC> _preserved_oop_stack;
static size_t _preserved_count;
static size_t _preserved_count_max;
@@ -137,7 +137,7 @@
static STWGCTimer* gc_timer() { return _gc_timer; }
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
- static void preserve_mark(oop p, markOop mark);
+ static void preserve_mark(oop p, markWord mark);
// Save the mark word so it can be restored later
static void adjust_marks(); // Adjust the pointers in the preserved marks table
static void restore_marks(); // Restore the marks that we saved in preserve_mark
@@ -199,10 +199,10 @@
class PreservedMark {
private:
oop _obj;
- markOop _mark;
+ markWord _mark;
public:
- void init(oop obj, markOop mark) {
+ void init(oop obj, markWord mark) {
_obj = obj;
_mark = mark;
}
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -37,10 +37,10 @@
inline void MarkSweep::mark_object(oop obj) {
// some marks may contain information we need to preserve so we store them away
// and overwrite the mark. We'll restore it at the end of markSweep.
- markOop mark = obj->mark_raw();
- obj->set_mark_raw(markOopDesc::prototype()->set_marked());
+ markWord mark = obj->mark_raw();
+ obj->set_mark_raw(markWord::prototype().set_marked());
- if (mark->must_be_preserved(obj)) {
+ if (mark.must_be_preserved(obj)) {
preserve_mark(obj, mark);
}
}
@@ -49,7 +49,7 @@
T heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
- if (!obj->mark_raw()->is_marked()) {
+ if (!obj->mark_raw().is_marked()) {
mark_object(obj);
_marking_stack.push(obj);
}
@@ -78,11 +78,11 @@
oop obj = CompressedOops::decode_not_null(heap_oop);
assert(Universe::heap()->is_in(obj), "should be in heap");
- oop new_obj = oop(obj->mark_raw()->decode_pointer());
+ oop new_obj = oop(obj->mark_raw().decode_pointer());
- assert(new_obj != NULL || // is forwarding ptr?
- obj->mark_raw() == markOopDesc::prototype() || // not gc marked?
- (UseBiasedLocking && obj->mark_raw()->has_bias_pattern()),
+ assert(new_obj != NULL || // is forwarding ptr?
+ obj->mark_raw() == markWord::prototype() || // not gc marked?
+ (UseBiasedLocking && obj->mark_raw().has_bias_pattern()),
// not gc marked?
"should be forwarded");
--- a/src/hotspot/share/gc/shared/ageTable.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/ageTable.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -78,8 +78,8 @@
uint result;
if (AlwaysTenure || NeverTenure) {
- assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
- "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
+ assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
+ "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
result = MaxTenuringThreshold;
} else {
size_t total = 0;
--- a/src/hotspot/share/gc/shared/ageTable.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/ageTable.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -41,7 +41,7 @@
public:
// constants
- enum { table_size = markOopDesc::max_age + 1 };
+ enum { table_size = markWord::max_age + 1 };
// instance variables
size_t sizes[table_size];
--- a/src/hotspot/share/gc/shared/gc_globals.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -811,12 +811,12 @@
\
product(uintx, MaxTenuringThreshold, 15, \
"Maximum value for tenuring threshold") \
- range(0, markOopDesc::max_age + 1) \
+ range(0, markWord::max_age + 1) \
constraint(MaxTenuringThresholdConstraintFunc,AfterErgo) \
\
product(uintx, InitialTenuringThreshold, 7, \
"Initial value for tenuring threshold") \
- range(0, markOopDesc::max_age + 1) \
+ range(0, markWord::max_age + 1) \
constraint(InitialTenuringThresholdConstraintFunc,AfterErgo) \
\
product(uintx, TargetSurvivorRatio, 50, \
--- a/src/hotspot/share/gc/shared/generation.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/generation.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -178,7 +178,7 @@
}
oop Generation::par_promote(int thread_num,
- oop obj, markOop m, size_t word_sz) {
+ oop obj, markWord m, size_t word_sz) {
// Could do a bad general impl here that gets a lock. But no.
ShouldNotCallThis();
return NULL;
--- a/src/hotspot/share/gc/shared/generation.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/generation.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -300,7 +300,7 @@
// word of "obj" may have been overwritten with a forwarding pointer, and
// also taking care to copy the klass pointer *last*. Returns the new
// object if successful, or else NULL.
- virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
+ virtual oop par_promote(int thread_num, oop obj, markWord m, size_t word_sz);
// Informs the current generation that all par_promote_alloc's in the
// collection have been completed; any supporting data structures can be
--- a/src/hotspot/share/gc/shared/memAllocator.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/memAllocator.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -387,7 +387,7 @@
oopDesc::set_mark_raw(mem, _klass->prototype_header());
} else {
// May be bootstrapping
- oopDesc::set_mark_raw(mem, markOopDesc::prototype());
+ oopDesc::set_mark_raw(mem, markWord::prototype());
}
// Need a release store to ensure array/class length, mark word, and
// object zeroing are visible before setting the klass non-NULL, for
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -32,16 +32,16 @@
void PreservedMarks::restore() {
while (!_stack.is_empty()) {
- const OopAndMarkOop elem = _stack.pop();
+ const OopAndMarkWord elem = _stack.pop();
elem.set_mark();
}
assert_empty();
}
void PreservedMarks::adjust_during_full_gc() {
- StackIterator<OopAndMarkOop, mtGC> iter(_stack);
+ StackIterator<OopAndMarkWord, mtGC> iter(_stack);
while (!iter.is_empty()) {
- OopAndMarkOop* elem = iter.next_addr();
+ OopAndMarkWord* elem = iter.next_addr();
oop obj = elem->get_oop();
if (obj->is_forwarded()) {
--- a/src/hotspot/share/gc/shared/preservedMarks.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/preservedMarks.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -35,28 +35,28 @@
class PreservedMarks {
private:
- class OopAndMarkOop {
+ class OopAndMarkWord {
private:
oop _o;
- markOop _m;
+ markWord _m;
public:
- OopAndMarkOop(oop obj, markOop m) : _o(obj), _m(m) { }
+ OopAndMarkWord(oop obj, markWord m) : _o(obj), _m(m) { }
oop get_oop() { return _o; }
inline void set_mark() const;
void set_oop(oop obj) { _o = obj; }
};
- typedef Stack<OopAndMarkOop, mtGC> OopAndMarkOopStack;
+ typedef Stack<OopAndMarkWord, mtGC> OopAndMarkWordStack;
- OopAndMarkOopStack _stack;
+ OopAndMarkWordStack _stack;
- inline bool should_preserve_mark(oop obj, markOop m) const;
+ inline bool should_preserve_mark(oop obj, markWord m) const;
public:
size_t size() const { return _stack.size(); }
- inline void push(oop obj, markOop m);
- inline void push_if_necessary(oop obj, markOop m);
+ inline void push(oop obj, markWord m);
+ inline void push_if_necessary(oop obj, markWord m);
// Iterate over the stack, restore all preserved marks, and
// reclaim the memory taken up by the stack segments.
void restore();
--- a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -30,17 +30,17 @@
#include "oops/oop.inline.hpp"
#include "utilities/stack.inline.hpp"
-inline bool PreservedMarks::should_preserve_mark(oop obj, markOop m) const {
- return m->must_be_preserved_for_promotion_failure(obj);
+inline bool PreservedMarks::should_preserve_mark(oop obj, markWord m) const {
+ return m.must_be_preserved_for_promotion_failure(obj);
}
-inline void PreservedMarks::push(oop obj, markOop m) {
+inline void PreservedMarks::push(oop obj, markWord m) {
assert(should_preserve_mark(obj, m), "pre-condition");
- OopAndMarkOop elem(obj, m);
+ OopAndMarkWord elem(obj, m);
_stack.push(elem);
}
-inline void PreservedMarks::push_if_necessary(oop obj, markOop m) {
+inline void PreservedMarks::push_if_necessary(oop obj, markWord m) {
if (should_preserve_mark(obj, m)) {
push(obj, m);
}
@@ -72,14 +72,14 @@
}
inline PreservedMarks::PreservedMarks()
- : _stack(OopAndMarkOopStack::default_segment_size(),
+ : _stack(OopAndMarkWordStack::default_segment_size(),
// This stack should be used very infrequently so there's
// no point in caching stack segments (there will be a
// waste of space most of the time). So we set the max
// cache size to 0.
0 /* max_cache_size */) { }
-void PreservedMarks::OopAndMarkOop::set_mark() const {
+void PreservedMarks::OopAndMarkWord::set_mark() const {
_o->set_mark_raw(_m);
}
--- a/src/hotspot/share/gc/shared/space.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/space.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -651,14 +651,14 @@
// allocate uninitialized int array
typeArrayOop t = (typeArrayOop) allocate(size);
assert(t != NULL, "allocation should succeed");
- t->set_mark_raw(markOopDesc::prototype());
+ t->set_mark_raw(markWord::prototype());
t->set_klass(Universe::intArrayKlassObj());
t->set_length((int)length);
} else {
assert(size == CollectedHeap::min_fill_size(),
"size for smallest fake object doesn't match");
instanceOop obj = (instanceOop) allocate(size);
- obj->set_mark_raw(markOopDesc::prototype());
+ obj->set_mark_raw(markWord::prototype());
obj->set_klass_gap(0);
obj->set_klass(SystemDictionary::Object_klass());
}
--- a/src/hotspot/share/gc/shared/space.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shared/space.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -117,7 +117,7 @@
_allowed_deadspace_words -= dead_length;
CollectedHeap::fill_with_object(dead_start, dead_length);
oop obj = oop(dead_start);
- obj->set_mark_raw(obj->mark_raw()->set_marked());
+ obj->set_mark_raw(obj->mark_raw().set_marked());
assert(dead_length == (size_t)obj->size(), "bad filler object size");
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
@@ -164,8 +164,8 @@
while (cur_obj < scan_limit) {
assert(!space->scanned_block_is_obj(cur_obj) ||
- oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() ||
- oop(cur_obj)->mark_raw()->has_bias_pattern(),
+ oop(cur_obj)->mark_raw().is_marked() || oop(cur_obj)->mark_raw().is_unlocked() ||
+ oop(cur_obj)->mark_raw().has_bias_pattern(),
"these are the only valid states during a mark sweep");
if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
// prefetch beyond cur_obj
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1458,9 +1458,9 @@
phase->register_new_node(markword, ctrl);
// Test if object is forwarded. This is the case if lowest two bits are set.
- Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markOopDesc::lock_mask_in_place));
+ Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markWord::lock_mask_in_place));
phase->register_new_node(masked, ctrl);
- Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markOopDesc::marked_value));
+ Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markWord::marked_value));
phase->register_new_node(cmp, ctrl);
// Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -61,7 +61,7 @@
r->print_on(&ss);
stringStream mw_ss;
- obj->mark()->print_on(&mw_ss);
+ obj->mark().print_on(&mw_ss);
ShenandoahMarkingContext* const ctx = heap->marking_context();
--- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -35,9 +35,9 @@
}
inline HeapWord* ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
- markOop mark = obj->mark_raw();
- if (mark->is_marked()) {
- return (HeapWord*) mark->clear_lock_bits();
+ markWord mark = obj->mark_raw();
+ if (mark.is_marked()) {
+ return (HeapWord*) mark.clear_lock_bits().to_pointer();
} else {
return (HeapWord*) obj;
}
@@ -49,21 +49,21 @@
}
inline bool ShenandoahForwarding::is_forwarded(oop obj) {
- return obj->mark_raw()->is_marked();
+ return obj->mark_raw().is_marked();
}
inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
- markOop old_mark = obj->mark_raw();
- if (old_mark->is_marked()) {
- return (oop) old_mark->clear_lock_bits();
+ markWord old_mark = obj->mark_raw();
+ if (old_mark.is_marked()) {
+ return oop(old_mark.clear_lock_bits().to_pointer());
}
- markOop new_mark = markOopDesc::encode_pointer_as_mark(update);
- markOop prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
+ markWord new_mark = markWord::encode_pointer_as_mark(update);
+ markWord prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
if (prev_mark == old_mark) {
return update;
} else {
- return (oop) prev_mark->clear_lock_bits();
+ return oop(prev_mark.clear_lock_bits().to_pointer());
}
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -47,17 +47,17 @@
"Only from a GC worker thread");
if (java_string->age() <= StringDeduplicationAgeThreshold) {
- const markOop mark = java_string->mark();
+ const markWord mark = java_string->mark();
// Having/had displaced header, too risk to deal with them, skip
- if (mark == markOopDesc::INFLATING() || mark->has_displaced_mark_helper()) {
+ if (mark == markWord::INFLATING() || mark.has_displaced_mark_helper()) {
return;
}
// Increase string age and enqueue it when it rearches age threshold
- markOop new_mark = mark->incr_age();
+ markWord new_mark = mark.incr_age();
if (mark == java_string->cas_set_mark(new_mark, mark)) {
- if (mark->age() == StringDeduplicationAgeThreshold) {
+ if (mark.age() == StringDeduplicationAgeThreshold) {
StringDedupQueue::push(ShenandoahWorkerSession::worker_id(), java_string);
}
}
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -666,17 +666,17 @@
BasicObjectLock* mon = &istate->monitor_base()[-1];
mon->set_obj(rcvr);
bool success = false;
- uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
- markOop mark = rcvr->mark();
- intptr_t hash = (intptr_t) markOopDesc::no_hash;
+ uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
+ markWord mark = rcvr->mark();
+ intptr_t hash = (intptr_t) markWord::no_hash;
// Implies UseBiasedLocking.
- if (mark->has_bias_pattern()) {
+ if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
- (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
- ~((uintptr_t) markOopDesc::age_mask_in_place);
+ (((uintptr_t)rcvr->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
+ ~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// Already biased towards this thread, nothing to do.
@@ -684,11 +684,11 @@
(* BiasedLocking::biased_lock_entry_count_addr())++;
}
success = true;
- } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+ } else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// Try to revoke bias.
- markOop header = rcvr->klass()->prototype_header();
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header = rcvr->klass()->prototype_header();
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
if (rcvr->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@@ -696,9 +696,9 @@
}
} else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
// Try to rebias.
- markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
- if (hash != markOopDesc::no_hash) {
- new_header = new_header->copy_set_hash(hash);
+ markWord new_header( (intptr_t) rcvr->klass()->prototype_header().value() | thread_ident);
+ if (hash != markWord::no_hash) {
+ new_header = new_header.copy_set_hash(hash);
}
if (rcvr->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@@ -710,15 +710,15 @@
success = true;
} else {
// Try to bias towards thread in case object is anonymously biased.
- markOop header = (markOop) ((uintptr_t) mark &
- ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
- (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header(mark.value() &
+ ((uintptr_t)markWord::biased_lock_mask_in_place |
+ (uintptr_t)markWord::age_mask_in_place | epoch_mask_in_place));
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
- markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+ markWord new_header(header.value() | thread_ident);
// Debugging hint.
- DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+ DEBUG_ONLY(mon->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (rcvr->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@@ -732,13 +732,13 @@
// Traditional lightweight locking.
if (!success) {
- markOop displaced = rcvr->mark()->set_unlocked();
+ markWord displaced = rcvr->mark().set_unlocked();
mon->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
- if (call_vm || rcvr->cas_set_mark((markOop)mon, displaced) != displaced) {
+ if (call_vm || rcvr->cas_set_mark(markWord::from_pointer(mon), displaced) != displaced) {
// Is it simple recursive case?
- if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
- mon->lock()->set_displaced_header(NULL);
+ if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
+ mon->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
}
@@ -851,18 +851,18 @@
assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
entry->set_obj(lockee);
bool success = false;
- uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
-
- markOop mark = lockee->mark();
- intptr_t hash = (intptr_t) markOopDesc::no_hash;
+ uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
+
+ markWord mark = lockee->mark();
+ intptr_t hash = (intptr_t) markWord::no_hash;
// implies UseBiasedLocking
- if (mark->has_bias_pattern()) {
+ if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
- (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
- ~((uintptr_t) markOopDesc::age_mask_in_place);
+ (((uintptr_t)lockee->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
+ ~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// already biased towards this thread, nothing to do
@@ -870,11 +870,11 @@
(* BiasedLocking::biased_lock_entry_count_addr())++;
}
success = true;
- } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+ } else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// try revoke bias
- markOop header = lockee->klass()->prototype_header();
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header = lockee->klass()->prototype_header();
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@@ -883,9 +883,9 @@
}
} else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
// try rebias
- markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
- if (hash != markOopDesc::no_hash) {
- new_header = new_header->copy_set_hash(hash);
+ markWord new_header( (intptr_t) lockee->klass()->prototype_header().value() | thread_ident);
+ if (hash != markWord::no_hash) {
+ new_header = new_header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics) {
@@ -897,14 +897,14 @@
success = true;
} else {
// try to bias towards thread in case object is anonymously biased
- markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
- (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header(mark.value() & ((uintptr_t)markWord::biased_lock_mask_in_place |
+ (uintptr_t)markWord::age_mask_in_place | epoch_mask_in_place));
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
- markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+ markWord new_header(header.value() | thread_ident);
// debugging hint
- DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+ DEBUG_ONLY(entry->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (lockee->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@@ -918,13 +918,13 @@
// traditional lightweight locking
if (!success) {
- markOop displaced = lockee->mark()->set_unlocked();
+ markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
- if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
+ if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
- if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
- entry->lock()->set_displaced_header(NULL);
+ if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
+ entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
@@ -1791,18 +1791,18 @@
if (entry != NULL) {
entry->set_obj(lockee);
int success = false;
- uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
-
- markOop mark = lockee->mark();
- intptr_t hash = (intptr_t) markOopDesc::no_hash;
+ uintptr_t epoch_mask_in_place = (uintptr_t)markWord::epoch_mask_in_place;
+
+ markWord mark = lockee->mark();
+ intptr_t hash = (intptr_t) markWord::no_hash;
// implies UseBiasedLocking
- if (mark->has_bias_pattern()) {
+ if (mark.has_bias_pattern()) {
uintptr_t thread_ident;
uintptr_t anticipated_bias_locking_value;
thread_ident = (uintptr_t)istate->thread();
anticipated_bias_locking_value =
- (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
- ~((uintptr_t) markOopDesc::age_mask_in_place);
+ (((uintptr_t)lockee->klass()->prototype_header().value() | thread_ident) ^ mark.value()) &
+ ~((uintptr_t) markWord::age_mask_in_place);
if (anticipated_bias_locking_value == 0) {
// already biased towards this thread, nothing to do
@@ -1811,11 +1811,11 @@
}
success = true;
}
- else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+ else if ((anticipated_bias_locking_value & markWord::biased_lock_mask_in_place) != 0) {
// try revoke bias
- markOop header = lockee->klass()->prototype_header();
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header = lockee->klass()->prototype_header();
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@@ -1824,9 +1824,9 @@
}
else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
// try rebias
- markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
- if (hash != markOopDesc::no_hash) {
- new_header = new_header->copy_set_hash(hash);
+ markWord new_header( (intptr_t) lockee->klass()->prototype_header().value() | thread_ident);
+ if (hash != markWord::no_hash) {
+ new_header = new_header.copy_set_hash(hash);
}
if (lockee->cas_set_mark(new_header, mark) == mark) {
if (PrintBiasedLockingStatistics)
@@ -1839,15 +1839,15 @@
}
else {
// try to bias towards thread in case object is anonymously biased
- markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
- (uintptr_t)markOopDesc::age_mask_in_place |
- epoch_mask_in_place));
- if (hash != markOopDesc::no_hash) {
- header = header->copy_set_hash(hash);
+ markWord header(mark.value() & ((uintptr_t)markWord::biased_lock_mask_in_place |
+ (uintptr_t)markWord::age_mask_in_place |
+ epoch_mask_in_place));
+ if (hash != markWord::no_hash) {
+ header = header.copy_set_hash(hash);
}
- markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+ markWord new_header(header.value() | thread_ident);
// debugging hint
- DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+ DEBUG_ONLY(entry->lock()->set_displaced_header(markWord((uintptr_t) 0xdeaddead));)
if (lockee->cas_set_mark(new_header, header) == header) {
if (PrintBiasedLockingStatistics)
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
@@ -1861,13 +1861,13 @@
// traditional lightweight locking
if (!success) {
- markOop displaced = lockee->mark()->set_unlocked();
+ markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
- if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
+ if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
- if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
- entry->lock()->set_displaced_header(NULL);
+ if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
+ entry->lock()->set_displaced_header(markWord::from_pointer(NULL));
} else {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
@@ -1890,13 +1890,13 @@
while (most_recent != limit ) {
if ((most_recent)->obj() == lockee) {
BasicLock* lock = most_recent->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
most_recent->set_obj(NULL);
- if (!lockee->mark()->has_bias_pattern()) {
+ if (!lockee->mark().has_bias_pattern()) {
bool call_vm = UseHeavyMonitors;
// If it isn't recursive we either must swap old header or call the runtime
- if (header != NULL || call_vm) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL || call_vm) {
+ markWord old_header = markWord::encode(lock);
if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
most_recent->set_obj(lockee);
@@ -2182,7 +2182,7 @@
if (UseBiasedLocking) {
result->set_mark(ik->prototype_header());
} else {
- result->set_mark(markOopDesc::prototype());
+ result->set_mark(markWord::prototype());
}
result->set_klass_gap(0);
result->set_klass(ik);
@@ -3035,13 +3035,13 @@
oop lockee = end->obj();
if (lockee != NULL) {
BasicLock* lock = end->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
end->set_obj(NULL);
- if (!lockee->mark()->has_bias_pattern()) {
+ if (!lockee->mark().has_bias_pattern()) {
// If it isn't recursive we either must swap old header or call the runtime
- if (header != NULL) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL) {
+ markWord old_header = markWord::encode(lock);
if (lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
end->set_obj(lockee);
@@ -3110,14 +3110,14 @@
}
} else {
BasicLock* lock = base->lock();
- markOop header = lock->displaced_header();
+ markWord header = lock->displaced_header();
base->set_obj(NULL);
- if (!rcvr->mark()->has_bias_pattern()) {
+ if (!rcvr->mark().has_bias_pattern()) {
base->set_obj(NULL);
// If it isn't recursive we either must swap old header or call the runtime
- if (header != NULL) {
- markOop old_header = markOopDesc::encode(lock);
+ if (header.to_pointer() != NULL) {
+ markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
base->set_obj(rcvr);
--- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -131,7 +131,7 @@
if (!_mark_bits->is_marked(pointee)) {
_mark_bits->mark_obj(pointee);
// is the pointee a sample object?
- if (NULL == pointee->mark()) {
+ if (NULL == pointee->mark().to_pointer()) {
add_chain(reference, pointee);
}
@@ -148,7 +148,7 @@
void BFSClosure::add_chain(const oop* reference, const oop pointee) {
assert(pointee != NULL, "invariant");
- assert(NULL == pointee->mark(), "invariant");
+ assert(NULL == pointee->mark().to_pointer(), "invariant");
Edge leak_edge(_current_parent, reference);
_edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
}
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -121,7 +121,7 @@
assert(_mark_bits->is_marked(pointee), "invariant");
// is the pointee a sample object?
- if (NULL == pointee->mark()) {
+ if (NULL == pointee->mark().to_pointer()) {
add_chain();
}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -233,8 +233,8 @@
StoredEdge* const leak_context_edge = put(edge->reference());
oop sample_object = edge->pointee();
assert(sample_object != NULL, "invariant");
- assert(NULL == sample_object->mark(), "invariant");
- sample_object->set_mark(markOop(leak_context_edge));
+ assert(NULL == sample_object->mark().to_pointer(), "invariant");
+ sample_object->set_mark(markWord::from_pointer(leak_context_edge));
return leak_context_edge;
}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -35,7 +35,7 @@
#include "runtime/handles.inline.hpp"
bool EdgeUtils::is_leak_edge(const Edge& edge) {
- return (const Edge*)edge.pointee()->mark() == &edge;
+ return (const Edge*)edge.pointee()->mark().to_pointer() == &edge;
}
static int field_offset(const StoredEdge& edge) {
--- a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -36,45 +36,45 @@
//
class ObjectSampleMarker : public StackObj {
private:
- class ObjectSampleMarkOop : public ResourceObj {
+ class ObjectSampleMarkWord : public ResourceObj {
friend class ObjectSampleMarker;
private:
oop _obj;
- markOop _mark_oop;
- ObjectSampleMarkOop(const oop obj,
- const markOop mark_oop) : _obj(obj),
- _mark_oop(mark_oop) {}
+ markWord _mark_word;
+ ObjectSampleMarkWord(const oop obj,
+ const markWord mark_word) : _obj(obj),
+ _mark_word(mark_word) {}
public:
- ObjectSampleMarkOop() : _obj(NULL), _mark_oop(NULL) {}
+ ObjectSampleMarkWord() : _obj(NULL), _mark_word(markWord::zero()) {}
};
- GrowableArray<ObjectSampleMarkOop>* _store;
+ GrowableArray<ObjectSampleMarkWord>* _store;
public:
ObjectSampleMarker() :
- _store(new GrowableArray<ObjectSampleMarkOop>(16)) {}
+ _store(new GrowableArray<ObjectSampleMarkWord>(16)) {}
~ObjectSampleMarker() {
assert(_store != NULL, "invariant");
- // restore the saved, original, markOop for sample objects
+ // restore the saved, original, markWord for sample objects
while (_store->is_nonempty()) {
- ObjectSampleMarkOop sample_oop = _store->pop();
- sample_oop._obj->set_mark(sample_oop._mark_oop);
- assert(sample_oop._obj->mark() == sample_oop._mark_oop, "invariant");
+ ObjectSampleMarkWord sample_oop = _store->pop();
+ sample_oop._obj->set_mark(sample_oop._mark_word);
+ assert(sample_oop._obj->mark() == sample_oop._mark_word, "invariant");
}
}
void mark(oop obj) {
assert(obj != NULL, "invariant");
- // save the original markOop
- _store->push(ObjectSampleMarkOop(obj, obj->mark()));
+ // save the original markWord
+ _store->push(ObjectSampleMarkWord(obj, obj->mark()));
// now we will "poison" the mark word of the sample object
// to the intermediate monitor INFLATING state.
// This is an "impossible" state during a safepoint,
// hence we will use it to quickly identify sample objects
// during the reachability search from gc roots.
- assert(NULL == markOopDesc::INFLATING(), "invariant");
- obj->set_mark(markOopDesc::INFLATING());
- assert(NULL == obj->mark(), "invariant");
+ assert(NULL == markWord::INFLATING().to_pointer(), "invariant");
+ obj->set_mark(markWord::INFLATING());
+ assert(NULL == obj->mark().to_pointer(), "invariant");
}
};
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -111,7 +111,7 @@
traceid gc_root_id = 0;
const Edge* edge = NULL;
if (SafepointSynchronize::is_at_safepoint()) {
- edge = (const Edge*)(*object_addr)->mark();
+ edge = (const Edge*)(*object_addr)->mark().to_pointer();
}
if (edge == NULL) {
// In order to dump out a representation of the event
--- a/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -27,42 +27,42 @@
#include "jfr/leakprofiler/utilities/saveRestore.hpp"
#include "oops/oop.inline.hpp"
-MarkOopContext::MarkOopContext() : _obj(NULL), _mark_oop(NULL) {}
+MarkWordContext::MarkWordContext() : _obj(NULL), _mark_word(markWord::zero()) {}
-MarkOopContext::MarkOopContext(const oop obj) : _obj(obj), _mark_oop(obj->mark()) {
- assert(_obj->mark() == _mark_oop, "invariant");
+MarkWordContext::MarkWordContext(const oop obj) : _obj(obj), _mark_word(obj->mark()) {
+ assert(_obj->mark() == _mark_word, "invariant");
// now we will "poison" the mark word of the object
// to the intermediate monitor INFLATING state.
// This is an "impossible" state during a safepoint,
// hence we will use it to quickly identify objects
// during the reachability search from gc roots.
- assert(NULL == markOopDesc::INFLATING(), "invariant");
- _obj->set_mark(markOopDesc::INFLATING());
- assert(NULL == obj->mark(), "invariant");
+ assert(markWord::zero() == markWord::INFLATING(), "invariant");
+ _obj->set_mark(markWord::INFLATING());
+ assert(markWord::zero() == obj->mark(), "invariant");
}
-MarkOopContext::~MarkOopContext() {
+MarkWordContext::~MarkWordContext() {
if (_obj != NULL) {
- _obj->set_mark(_mark_oop);
- assert(_obj->mark() == _mark_oop, "invariant");
+ _obj->set_mark(_mark_word);
+ assert(_obj->mark() == _mark_word, "invariant");
}
}
-MarkOopContext::MarkOopContext(const MarkOopContext& rhs) : _obj(NULL), _mark_oop(NULL) {
- swap(const_cast<MarkOopContext&>(rhs));
+MarkWordContext::MarkWordContext(const MarkWordContext& rhs) : _obj(NULL), _mark_word(markWord::zero()) {
+ swap(const_cast<MarkWordContext&>(rhs));
}
-void MarkOopContext::operator=(MarkOopContext rhs) {
+void MarkWordContext::operator=(MarkWordContext rhs) {
swap(rhs);
}
-void MarkOopContext::swap(MarkOopContext& rhs) {
+void MarkWordContext::swap(MarkWordContext& rhs) {
oop temp_obj = rhs._obj;
- markOop temp_mark_oop = rhs._mark_oop;
+ markWord temp_mark_word = rhs._mark_word;
rhs._obj = _obj;
- rhs._mark_oop = _mark_oop;
+ rhs._mark_word = _mark_word;
_obj = temp_obj;
- _mark_oop = temp_mark_oop;
+ _mark_word = temp_mark_word;
}
CLDClaimContext::CLDClaimContext() : _cld(NULL) {}
--- a/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -78,20 +78,20 @@
* The destructor will restore the original mark oop.
*/
-class MarkOopContext {
+class MarkWordContext {
private:
oop _obj;
- markOop _mark_oop;
- void swap(MarkOopContext& rhs);
+ markWord _mark_word;
+ void swap(MarkWordContext& rhs);
public:
- MarkOopContext();
- MarkOopContext(const oop obj);
- MarkOopContext(const MarkOopContext& rhs);
- void operator=(MarkOopContext rhs);
- ~MarkOopContext();
+ MarkWordContext();
+ MarkWordContext(const oop obj);
+ MarkWordContext(const MarkWordContext& rhs);
+ void operator=(MarkWordContext rhs);
+ ~MarkWordContext();
};
-typedef SaveRestore<oop, ContextStore<oop, MarkOopContext> > SaveRestoreMarkOops;
+typedef SaveRestore<oop, ContextStore<oop, MarkWordContext> > SaveRestoreMarkWords;
class ClassLoaderData;
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -385,8 +385,8 @@
IF_TRACE_jvmci_3 {
char type[O_BUFLEN];
obj->klass()->name()->as_C_string(type, O_BUFLEN);
- markOop mark = obj->mark();
- TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(mark), p2i(lock));
+ markWord mark = obj->mark();
+ TRACE_jvmci_3("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, mark.value(), p2i(lock));
tty->flush();
}
if (PrintBiasedLockingStatistics) {
@@ -435,7 +435,7 @@
IF_TRACE_jvmci_3 {
char type[O_BUFLEN];
obj->klass()->name()->as_C_string(type, O_BUFLEN);
- TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, p2i(obj->mark()), p2i(lock));
+ TRACE_jvmci_3("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), p2i(obj), type, obj->mark().value(), p2i(lock));
tty->flush();
}
JRT_END
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -103,7 +103,7 @@
nonstatic_field(Array<Klass*>, _length, int) \
nonstatic_field(Array<Klass*>, _data[0], Klass*) \
\
- volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
+ volatile_nonstatic_field(BasicLock, _displaced_header, markWord) \
\
static_field(CodeCache, _low_bound, address) \
static_field(CodeCache, _high_bound, address) \
@@ -194,7 +194,7 @@
nonstatic_field(Klass, _subklass, Klass*) \
nonstatic_field(Klass, _layout_helper, jint) \
nonstatic_field(Klass, _name, Symbol*) \
- nonstatic_field(Klass, _prototype_header, markOop) \
+ nonstatic_field(Klass, _prototype_header, markWord) \
nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _java_mirror, OopHandle) \
nonstatic_field(Klass, _modifier_flags, jint) \
@@ -257,7 +257,7 @@
volatile_nonstatic_field(ObjectMonitor, _EntryList, ObjectWaiter*) \
volatile_nonstatic_field(ObjectMonitor, _succ, Thread*) \
\
- volatile_nonstatic_field(oopDesc, _mark, markOop) \
+ volatile_nonstatic_field(oopDesc, _mark, markWord) \
volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \
\
static_field(os, _polling_page, address) \
@@ -563,7 +563,7 @@
declare_constant(Klass::_lh_array_tag_type_value) \
declare_constant(Klass::_lh_array_tag_obj_value) \
\
- declare_constant(markOopDesc::no_hash) \
+ declare_constant(markWord::no_hash) \
\
declare_constant(Method::_caller_sensitive) \
declare_constant(Method::_force_inline) \
@@ -595,19 +595,19 @@
declare_constant(InvocationCounter::count_increment) \
declare_constant(InvocationCounter::count_shift) \
\
- declare_constant(markOopDesc::hash_shift) \
+ declare_constant(markWord::hash_shift) \
\
- declare_constant(markOopDesc::biased_lock_mask_in_place) \
- declare_constant(markOopDesc::age_mask_in_place) \
- declare_constant(markOopDesc::epoch_mask_in_place) \
- declare_constant(markOopDesc::hash_mask) \
- declare_constant(markOopDesc::hash_mask_in_place) \
+ declare_constant(markWord::biased_lock_mask_in_place) \
+ declare_constant(markWord::age_mask_in_place) \
+ declare_constant(markWord::epoch_mask_in_place) \
+ declare_constant(markWord::hash_mask) \
+ declare_constant(markWord::hash_mask_in_place) \
\
- declare_constant(markOopDesc::unlocked_value) \
- declare_constant(markOopDesc::biased_lock_pattern) \
+ declare_constant(markWord::unlocked_value) \
+ declare_constant(markWord::biased_lock_pattern) \
\
- declare_constant(markOopDesc::no_hash_in_place) \
- declare_constant(markOopDesc::no_lock_in_place) \
+ declare_constant(markWord::no_hash_in_place) \
+ declare_constant(markWord::no_lock_in_place) \
#define VM_ADDRESSES(declare_address, declare_preprocessor_address, declare_function) \
declare_function(SharedRuntime::register_finalizer) \
--- a/src/hotspot/share/memory/heapShared.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/memory/heapShared.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -98,7 +98,7 @@
}
unsigned HeapShared::oop_hash(oop const& p) {
- assert(!p->mark()->has_bias_pattern(),
+ assert(!p->mark().has_bias_pattern(),
"this object should never have been locked"); // so identity_hash won't safepoin
unsigned hash = (unsigned)p->identity_hash();
return hash;
--- a/src/hotspot/share/memory/universe.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/memory/universe.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1185,12 +1185,12 @@
}
uintptr_t Universe::verify_mark_mask() {
- return markOopDesc::lock_mask_in_place;
+ return markWord::lock_mask_in_place;
}
uintptr_t Universe::verify_mark_bits() {
intptr_t mask = verify_mark_mask();
- intptr_t bits = (intptr_t)markOopDesc::prototype();
+ intptr_t bits = (intptr_t)markWord::prototype().value();
assert((bits & ~mask) == 0, "no stray header bits");
return bits;
}
--- a/src/hotspot/share/memory/virtualspace.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/memory/virtualspace.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -623,9 +623,9 @@
initialize(size, alignment, large, NULL, false);
}
- assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
+ assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
"area must be distinguishable from marks for mark-sweep");
- assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
+ assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
"area must be distinguishable from marks for mark-sweep");
if (base() != NULL) {
--- a/src/hotspot/share/oops/arrayOop.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/arrayOop.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -34,7 +34,7 @@
// The layout of array Oops is:
//
-// markOop
+// markWord
// Klass* // 32 bits if compressed but declared 64 in LP64.
// length // shares klass memory or allocated after declared fields.
--- a/src/hotspot/share/oops/klass.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/klass.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -195,7 +195,7 @@
// should be NULL before setting it.
Klass::Klass(KlassID id) : _id(id),
_java_mirror(NULL),
- _prototype_header(markOopDesc::prototype()),
+ _prototype_header(markWord::prototype()),
_shared_class_path_index(-1) {
CDS_ONLY(_shared_class_flags = 0;)
CDS_JAVA_HEAP_ONLY(_archived_mirror = 0;)
@@ -744,9 +744,9 @@
if (WizardMode) {
// print header
- obj->mark()->print_on(st);
+ obj->mark().print_on(st);
st->cr();
- st->print(BULLET"prototype_header: " INTPTR_FORMAT, p2i(_prototype_header));
+ st->print(BULLET"prototype_header: " INTPTR_FORMAT, _prototype_header.value());
st->cr();
}
--- a/src/hotspot/share/oops/klass.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/klass.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -28,6 +28,7 @@
#include "classfile/classLoaderData.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
+#include "oops/markOop.hpp"
#include "oops/metadata.hpp"
#include "oops/oop.hpp"
#include "oops/oopHandle.hpp"
@@ -159,7 +160,7 @@
// Biased locking implementation and statistics
// (the 64-bit chunk goes first, to avoid some fragmentation)
jlong _last_biased_lock_bulk_revocation_time;
- markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
+ markWord _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
// vtable length
@@ -619,9 +620,9 @@
// Biased locking support
// Note: the prototype header is always set up to be at least the
- // prototype markOop. If biased locking is enabled it may further be
+ // prototype markWord. If biased locking is enabled it may further be
// biasable and have an epoch.
- markOop prototype_header() const { return _prototype_header; }
+ markWord prototype_header() const { return _prototype_header; }
// NOTE: once instances of this klass are floating around in the
// system, this header must only be updated at a safepoint.
// NOTE 2: currently we only ever set the prototype header to the
@@ -630,7 +631,7 @@
// wanting to reduce the initial scope of this optimization. There
// are potential problems in setting the bias pattern for
// JVM-internal oops.
- inline void set_prototype_header(markOop header);
+ inline void set_prototype_header(markWord header);
static ByteSize prototype_header_offset() { return in_ByteSize(offset_of(Klass, _prototype_header)); }
int biased_lock_revocation_count() const { return (int) _biased_lock_revocation_count; }
--- a/src/hotspot/share/oops/klass.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/klass.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -29,8 +29,8 @@
#include "oops/klass.hpp"
#include "oops/markOop.hpp"
-inline void Klass::set_prototype_header(markOop header) {
- assert(!header->has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
+inline void Klass::set_prototype_header(markWord header) {
+ assert(!header.has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
_prototype_header = header;
}
--- a/src/hotspot/share/oops/markOop.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/markOop.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -27,7 +27,7 @@
#include "runtime/thread.inline.hpp"
#include "runtime/objectMonitor.hpp"
-void markOopDesc::print_on(outputStream* st) const {
+void markWord::print_on(outputStream* st) const {
if (is_marked()) { // last bits = 11
st->print(" marked(" INTPTR_FORMAT ")", value());
} else if (has_monitor()) { // last bits = 10
--- a/src/hotspot/share/oops/markOop.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/markOop.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -25,12 +25,11 @@
#ifndef SHARE_OOPS_MARKOOP_HPP
#define SHARE_OOPS_MARKOOP_HPP
-#include "oops/oop.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
+#include "oops/oopsHierarchy.hpp"
-// The markOop describes the header of an object.
-//
-// Note that the mark is not a real oop but just a word.
-// It is placed in the oop hierarchy for historical reasons.
+// The markWord describes the header of an object.
//
// Bit-format of an object header (most significant first, big endian layout below):
//
@@ -101,12 +100,35 @@
class ObjectMonitor;
class JavaThread;
-class markOopDesc: public oopDesc {
+class markWord {
private:
- // Conversion
- uintptr_t value() const { return (uintptr_t) this; }
+ uintptr_t _value;
public:
+ explicit markWord(uintptr_t value) : _value(value) {}
+
+ markWord() { /* uninitialized */}
+
+ // It is critical for performance that this class be trivially
+ // destructable, copyable, and assignable.
+
+ static markWord from_pointer(void* ptr) {
+ return markWord((uintptr_t)ptr);
+ }
+ void* to_pointer() const {
+ return (void*)_value;
+ }
+
+ bool operator==(const markWord& other) const {
+ return _value == other._value;
+ }
+ bool operator!=(const markWord& other) const {
+ return !operator==(other);
+ }
+
+ // Conversion
+ uintptr_t value() const { return _value; }
+
// Constants
enum { age_bits = 4,
lock_bits = 2,
@@ -164,6 +186,9 @@
enum { max_bias_epoch = epoch_mask };
+ // Creates a markWord with all bits set to zero.
+ static markWord zero() { return markWord(uintptr_t(0)); }
+
// Biased Locking accessors.
// These must be checked by all code which calls into the
// ObjectSynchronizer and other code. The biasing is not understood
@@ -189,17 +214,17 @@
assert(has_bias_pattern(), "should not call this otherwise");
return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
}
- markOop set_bias_epoch(int epoch) {
+ markWord set_bias_epoch(int epoch) {
assert(has_bias_pattern(), "should not call this otherwise");
assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
- return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
+ return markWord(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
}
- markOop incr_bias_epoch() {
+ markWord incr_bias_epoch() {
return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
}
// Prototype mark for initialization
- static markOop biased_locking_prototype() {
- return markOop( biased_lock_pattern );
+ static markWord biased_locking_prototype() {
+ return markWord( biased_lock_pattern );
}
// lock accessors (note that these assume lock_shift == 0)
@@ -214,7 +239,7 @@
}
bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
- // Special temporary state of the markOop while being inflated.
+ // Special temporary state of the markWord while being inflated.
// Code that looks at mark outside a lock need to take this into account.
bool is_being_inflated() const { return (value() == 0); }
@@ -224,7 +249,7 @@
// check for and avoid overwriting a 0 value installed by some
// other thread. (They should spin or block instead. The 0 value
// is transient and *should* be short-lived).
- static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress
+ static markWord INFLATING() { return zero(); } // inflate-in-progress
// Should this header be preserved during GC?
inline bool must_be_preserved(oop obj_containing_mark) const;
@@ -259,9 +284,9 @@
// WARNING: The following routines are used EXCLUSIVELY by
// synchronization functions. They are not really gc safe.
- // They must get updated if markOop layout get changed.
- markOop set_unlocked() const {
- return markOop(value() | unlocked_value);
+ // They must get updated if markWord layout get changed.
+ markWord set_unlocked() const {
+ return markWord(value() | unlocked_value);
}
bool has_locker() const {
return ((value() & lock_mask_in_place) == locked_value);
@@ -281,56 +306,56 @@
bool has_displaced_mark_helper() const {
return ((value() & unlocked_value) == 0);
}
- markOop displaced_mark_helper() const {
+ markWord displaced_mark_helper() const {
assert(has_displaced_mark_helper(), "check");
intptr_t ptr = (value() & ~monitor_value);
- return *(markOop*)ptr;
+ return *(markWord*)ptr;
}
- void set_displaced_mark_helper(markOop m) const {
+ void set_displaced_mark_helper(markWord m) const {
assert(has_displaced_mark_helper(), "check");
intptr_t ptr = (value() & ~monitor_value);
- *(markOop*)ptr = m;
+ ((markWord*)ptr)->_value = m._value;
}
- markOop copy_set_hash(intptr_t hash) const {
+ markWord copy_set_hash(intptr_t hash) const {
intptr_t tmp = value() & (~hash_mask_in_place);
tmp |= ((hash & hash_mask) << hash_shift);
- return (markOop)tmp;
+ return markWord(tmp);
}
// it is only used to be stored into BasicLock as the
// indicator that the lock is using heavyweight monitor
- static markOop unused_mark() {
- return (markOop) marked_value;
+ static markWord unused_mark() {
+ return markWord(marked_value);
}
- // the following two functions create the markOop to be
+ // the following two functions create the markWord to be
// stored into object header, it encodes monitor info
- static markOop encode(BasicLock* lock) {
- return (markOop) lock;
+ static markWord encode(BasicLock* lock) {
+ return from_pointer(lock);
}
- static markOop encode(ObjectMonitor* monitor) {
+ static markWord encode(ObjectMonitor* monitor) {
intptr_t tmp = (intptr_t) monitor;
- return (markOop) (tmp | monitor_value);
+ return markWord(tmp | monitor_value);
}
- static markOop encode(JavaThread* thread, uint age, int bias_epoch) {
+ static markWord encode(JavaThread* thread, uint age, int bias_epoch) {
intptr_t tmp = (intptr_t) thread;
assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
assert(age <= max_age, "age too large");
assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
- return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
+ return markWord(tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
}
// used to encode pointers during GC
- markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
+ markWord clear_lock_bits() { return markWord(value() & ~lock_mask_in_place); }
// age operations
- markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
- markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }
+ markWord set_marked() { return markWord((value() & ~lock_mask_in_place) | marked_value); }
+ markWord set_unmarked() { return markWord((value() & ~lock_mask_in_place) | unlocked_value); }
- uint age() const { return mask_bits(value() >> age_shift, age_mask); }
- markOop set_age(uint v) const {
+ uint age() const { return mask_bits(value() >> age_shift, age_mask); }
+ markWord set_age(uint v) const {
assert((v & ~age_mask) == 0, "shouldn't overflow age field");
- return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
+ return markWord((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
}
- markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
+ markWord incr_age() const { return age() == max_age ? markWord(_value) : set_age(age() + 1); }
// hash operations
intptr_t hash() const {
@@ -342,24 +367,24 @@
}
// Prototype mark for initialization
- static markOop prototype() {
- return markOop( no_hash_in_place | no_lock_in_place );
+ static markWord prototype() {
+ return markWord( no_hash_in_place | no_lock_in_place );
}
// Helper function for restoration of unmarked mark oops during GC
- static inline markOop prototype_for_object(oop obj);
+ static inline markWord prototype_for_object(oop obj);
// Debugging
void print_on(outputStream* st) const;
// Prepare address of oop for placement into mark
- inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
+ inline static markWord encode_pointer_as_mark(void* p) { return from_pointer(p).set_marked(); }
// Recover address of oop from encoded form used in mark
- inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
+ inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
- // These markOops indicate cms free chunk blocks and not objects.
- // In 64 bit, the markOop is set to distinguish them from oops.
+ // These markWords indicate cms free chunk blocks and not objects.
+ // In 64 bit, the markWord is set to distinguish them from oops.
// These are defined in 32 bit mode for vmStructs.
const static uintptr_t cms_free_chunk_pattern = 0x1;
@@ -374,9 +399,9 @@
(address_word)size_mask << size_shift;
#ifdef _LP64
- static markOop cms_free_prototype() {
- return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
- ((cms_free_chunk_pattern & cms_mask) << cms_shift));
+ static markWord cms_free_prototype() {
+ return markWord(((intptr_t)prototype().value() & ~cms_mask_in_place) |
+ ((cms_free_chunk_pattern & cms_mask) << cms_shift));
}
uintptr_t cms_encoding() const {
return mask_bits(value() >> cms_shift, cms_mask);
@@ -387,12 +412,22 @@
}
size_t get_size() const { return (size_t)(value() >> size_shift); }
- static markOop set_size_and_free(size_t size) {
+ static markWord set_size_and_free(size_t size) {
assert((size & ~size_mask) == 0, "shouldn't overflow size field");
- return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
- (((intptr_t)size & size_mask) << size_shift));
+ return markWord(((intptr_t)cms_free_prototype().value() & ~size_mask_in_place) |
+ (((intptr_t)size & size_mask) << size_shift));
}
#endif // _LP64
};
+// Support atomic operations.
+template<>
+struct PrimitiveConversions::Translate<markWord> : public TrueType {
+ typedef markWord Value;
+ typedef uintptr_t Decayed;
+
+ static Decayed decay(const Value& x) { return x.value(); }
+ static Value recover(Decayed x) { return Value(x); }
+};
+
#endif // SHARE_OOPS_MARKOOP_HPP
--- a/src/hotspot/share/oops/markOop.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/markOop.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -31,15 +31,15 @@
#include "runtime/globals.hpp"
// Should this header be preserved during GC (when biased locking is enabled)?
-inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved_with_bias(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
if (has_bias_pattern()) {
// Will reset bias at end of collection
// Mark words of biased and currently locked objects are preserved separately
return false;
}
- markOop prototype_header = prototype_for_object(obj_containing_mark);
- if (prototype_header->has_bias_pattern()) {
+ markWord prototype_header = prototype_for_object(obj_containing_mark);
+ if (prototype_header.has_bias_pattern()) {
// Individual instance which has its bias revoked; must return
// true for correctness
return true;
@@ -48,7 +48,7 @@
}
// Should this header be preserved during GC?
-inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias(obj_containing_mark);
@@ -56,7 +56,7 @@
// Should this header be preserved in the case of a promotion failure
// during scavenge (when biased locking is enabled)?
-inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// We don't explicitly save off the mark words of biased and
// currently-locked objects during scavenges, so if during a
@@ -68,7 +68,7 @@
// BiasedLocking::preserve_marks() / restore_marks() in the middle
// of a scavenge when a promotion failure has first been detected.
if (has_bias_pattern() ||
- prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
+ prototype_for_object(obj_containing_mark).has_bias_pattern()) {
return true;
}
return (!is_unlocked() || !has_no_hash());
@@ -76,7 +76,7 @@
// Should this header be preserved in the case of a promotion failure
// during scavenge?
-inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
+inline bool markWord::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
@@ -85,11 +85,11 @@
// Same as must_be_preserved_with_bias_for_promotion_failure() except that
// it takes a Klass* argument, instead of the object of which this is the mark word.
-inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
+inline bool markWord::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// CMS scavenges preserve mark words in similar fashion to promotion failures; see above
if (has_bias_pattern() ||
- klass_of_obj_containing_mark->prototype_header()->has_bias_pattern()) {
+ klass_of_obj_containing_mark->prototype_header().has_bias_pattern()) {
return true;
}
return (!is_unlocked() || !has_no_hash());
@@ -97,16 +97,16 @@
// Same as must_be_preserved_for_promotion_failure() except that
// it takes a Klass* argument, instead of the object of which this is the mark word.
-inline bool markOopDesc::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
+inline bool markWord::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
}
-inline markOop markOopDesc::prototype_for_object(oop obj) {
+inline markWord markWord::prototype_for_object(oop obj) {
#ifdef ASSERT
- markOop prototype_header = obj->klass()->prototype_header();
- assert(prototype_header == prototype() || prototype_header->has_bias_pattern(), "corrupt prototype header");
+ markWord prototype_header = obj->klass()->prototype_header();
+ assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");
#endif
return obj->klass()->prototype_header();
}
--- a/src/hotspot/share/oops/method.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/method.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -2067,7 +2067,7 @@
#endif // PRODUCT
};
-// Something that can't be mistaken for an address or a markOop
+// Something that can't be mistaken for an address or a markWord
Method* const JNIMethodBlock::_free_method = (Method*)55;
JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(NULL) {
--- a/src/hotspot/share/oops/objArrayOop.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/objArrayOop.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -37,7 +37,7 @@
friend class ObjArrayKlass;
friend class Runtime1;
friend class psPromotionManager;
- friend class CSetMarkOopClosure;
+ friend class CSetMarkWordClosure;
friend class G1ParScanPartialArrayClosure;
template <class T> T* obj_at_addr(int index) const;
--- a/src/hotspot/share/oops/oop.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/oop.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -105,14 +105,14 @@
return false;
}
- // Header verification: the mark is typically non-NULL. If we're
- // at a safepoint, it must not be null.
+ // Header verification: the mark is typically non-zero. If we're
+ // at a safepoint, it must not be zero.
// Outside of a safepoint, the header could be changing (for example,
// another thread could be inflating a lock on this object).
if (ignore_mark_word) {
return true;
}
- if (obj->mark_raw() != NULL) {
+ if (obj->mark_raw().value() != 0) {
return true;
}
return !SafepointSynchronize::is_at_safepoint();
--- a/src/hotspot/share/oops/oop.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/oop.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -28,6 +28,7 @@
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "oops/access.hpp"
+#include "oops/markOop.hpp"
#include "oops/metadata.hpp"
#include "runtime/atomic.hpp"
#include "utilities/macros.hpp"
@@ -55,24 +56,24 @@
friend class VMStructs;
friend class JVMCIVMStructs;
private:
- volatile markOop _mark;
+ volatile markWord _mark;
union _metadata {
Klass* _klass;
narrowKlass _compressed_klass;
} _metadata;
public:
- inline markOop mark() const;
- inline markOop mark_raw() const;
- inline markOop* mark_addr_raw() const;
+ inline markWord mark() const;
+ inline markWord mark_raw() const;
+ inline markWord* mark_addr_raw() const;
- inline void set_mark(volatile markOop m);
- inline void set_mark_raw(volatile markOop m);
- static inline void set_mark_raw(HeapWord* mem, markOop m);
+ inline void set_mark(volatile markWord m);
+ inline void set_mark_raw(volatile markWord m);
+ static inline void set_mark_raw(HeapWord* mem, markWord m);
- inline void release_set_mark(markOop m);
- inline markOop cas_set_mark(markOop new_mark, markOop old_mark);
- inline markOop cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order = memory_order_conservative);
+ inline void release_set_mark(markWord m);
+ inline markWord cas_set_mark(markWord new_mark, markWord old_mark);
+ inline markWord cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order = memory_order_conservative);
// Used only to re-initialize the mark word (e.g., of promoted
// objects during a GC) -- requires a valid klass pointer
@@ -266,13 +267,13 @@
inline bool is_forwarded() const;
inline void forward_to(oop p);
- inline bool cas_forward_to(oop p, markOop compare, atomic_memory_order order = memory_order_conservative);
+ inline bool cas_forward_to(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);
// Like "forward_to", but inserts the forwarding pointer atomically.
// Exactly one thread succeeds in inserting the forwarding pointer, and
// this call returns "NULL" for that thread; any other thread has the
// value of the forwarding pointer returned and does not modify "this".
- inline oop forward_to_atomic(oop p, markOop compare, atomic_memory_order order = memory_order_conservative);
+ inline oop forward_to_atomic(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);
inline oop forwardee() const;
inline oop forwardee_acquire() const;
@@ -308,9 +309,9 @@
intptr_t slow_identity_hash();
// marks are forwarded to stack when object is locked
- inline bool has_displaced_mark_raw() const;
- inline markOop displaced_mark_raw() const;
- inline void set_displaced_mark_raw(markOop m);
+ inline bool has_displaced_mark_raw() const;
+ inline markWord displaced_mark_raw() const;
+ inline void set_displaced_mark_raw(markWord m);
static bool has_klass_gap();
--- a/src/hotspot/share/oops/oop.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/oop.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -43,48 +43,50 @@
// Implementation of all inlined member functions defined in oop.hpp
// We need a separate file to avoid circular references
-markOop oopDesc::mark() const {
- return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
+markWord oopDesc::mark() const {
+ uintptr_t v = HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
+ return markWord(v);
}
-markOop oopDesc::mark_raw() const {
- return _mark;
+markWord oopDesc::mark_raw() const {
+ return Atomic::load(&_mark);
}
-markOop* oopDesc::mark_addr_raw() const {
- return (markOop*) &_mark;
+markWord* oopDesc::mark_addr_raw() const {
+ return (markWord*) &_mark;
}
-void oopDesc::set_mark(volatile markOop m) {
- HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
+void oopDesc::set_mark(markWord m) {
+ HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
}
-void oopDesc::set_mark_raw(volatile markOop m) {
- _mark = m;
+void oopDesc::set_mark_raw(markWord m) {
+ Atomic::store(m, &_mark);
}
-void oopDesc::set_mark_raw(HeapWord* mem, markOop m) {
- *(markOop*)(((char*)mem) + mark_offset_in_bytes()) = m;
+void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
+ *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
}
-void oopDesc::release_set_mark(markOop m) {
- HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
+void oopDesc::release_set_mark(markWord m) {
+ HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
}
-markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
- return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
+markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
+ uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(new_mark.value(), as_oop(), mark_offset_in_bytes(), old_mark.value());
+ return markWord(v);
}
-markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
+markWord oopDesc::cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order) {
return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
}
void oopDesc::init_mark() {
- set_mark(markOopDesc::prototype_for_object(this));
+ set_mark(markWord::prototype_for_object(this));
}
void oopDesc::init_mark_raw() {
- set_mark_raw(markOopDesc::prototype_for_object(this));
+ set_mark_raw(markWord::prototype_for_object(this));
}
Klass* oopDesc::klass() const {
@@ -319,31 +321,31 @@
inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
bool oopDesc::is_locked() const {
- return mark()->is_locked();
+ return mark().is_locked();
}
bool oopDesc::is_unlocked() const {
- return mark()->is_unlocked();
+ return mark().is_unlocked();
}
bool oopDesc::has_bias_pattern() const {
- return mark()->has_bias_pattern();
+ return mark().has_bias_pattern();
}
bool oopDesc::has_bias_pattern_raw() const {
- return mark_raw()->has_bias_pattern();
+ return mark_raw().has_bias_pattern();
}
// Used only for markSweep, scavenging
bool oopDesc::is_gc_marked() const {
- return mark_raw()->is_marked();
+ return mark_raw().is_marked();
}
// Used by scavengers
bool oopDesc::is_forwarded() const {
// The extra heap check is needed since the obj might be locked, in which case the
// mark would point to a stack location and have the sentinel bit cleared
- return mark_raw()->is_marked();
+ return mark_raw().is_marked();
}
// Used by scavengers
@@ -355,36 +357,36 @@
assert(!is_archived_object(oop(this)) &&
!is_archived_object(p),
"forwarding archive object");
- markOop m = markOopDesc::encode_pointer_as_mark(p);
- assert(m->decode_pointer() == p, "encoding must be reversable");
+ markWord m = markWord::encode_pointer_as_mark(p);
+ assert(m.decode_pointer() == p, "encoding must be reversable");
set_mark_raw(m);
}
// Used by parallel scavengers
-bool oopDesc::cas_forward_to(oop p, markOop compare, atomic_memory_order order) {
+bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
assert(check_obj_alignment(p),
"forwarding to something not aligned");
assert(Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
- markOop m = markOopDesc::encode_pointer_as_mark(p);
- assert(m->decode_pointer() == p, "encoding must be reversable");
+ markWord m = markWord::encode_pointer_as_mark(p);
+ assert(m.decode_pointer() == p, "encoding must be reversable");
return cas_set_mark_raw(m, compare, order) == compare;
}
-oop oopDesc::forward_to_atomic(oop p, markOop compare, atomic_memory_order order) {
+oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
// CMS forwards some non-heap value into the mark oop to reserve oops during
// promotion, so the next two asserts do not hold.
assert(UseConcMarkSweepGC || check_obj_alignment(p),
"forwarding to something not aligned");
assert(UseConcMarkSweepGC || Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
- markOop m = markOopDesc::encode_pointer_as_mark(p);
- assert(m->decode_pointer() == p, "encoding must be reversable");
- markOop old_mark = cas_set_mark_raw(m, compare, order);
+ markWord m = markWord::encode_pointer_as_mark(p);
+ assert(m.decode_pointer() == p, "encoding must be reversable");
+ markWord old_mark = cas_set_mark_raw(m, compare, order);
if (old_mark == compare) {
return NULL;
} else {
- return (oop)old_mark->decode_pointer();
+ return (oop)old_mark.decode_pointer();
}
}
@@ -392,33 +394,32 @@
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
oop oopDesc::forwardee() const {
- return (oop) mark_raw()->decode_pointer();
+ return (oop) mark_raw().decode_pointer();
}
// Note that the forwardee is not the same thing as the displaced_mark.
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
oop oopDesc::forwardee_acquire() const {
- markOop m = OrderAccess::load_acquire(&_mark);
- return (oop) m->decode_pointer();
+ return (oop) OrderAccess::load_acquire(&_mark).decode_pointer();
}
// The following method needs to be MT safe.
uint oopDesc::age() const {
assert(!is_forwarded(), "Attempt to read age from forwarded mark");
if (has_displaced_mark_raw()) {
- return displaced_mark_raw()->age();
+ return displaced_mark_raw().age();
} else {
- return mark_raw()->age();
+ return mark_raw().age();
}
}
void oopDesc::incr_age() {
assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
if (has_displaced_mark_raw()) {
- set_displaced_mark_raw(displaced_mark_raw()->incr_age());
+ set_displaced_mark_raw(displaced_mark_raw().incr_age());
} else {
- set_mark_raw(mark_raw()->incr_age());
+ set_mark_raw(mark_raw().incr_age());
}
}
@@ -460,26 +461,26 @@
intptr_t oopDesc::identity_hash() {
// Fast case; if the object is unlocked and the hash value is set, no locking is needed
// Note: The mark must be read into local variable to avoid concurrent updates.
- markOop mrk = mark();
- if (mrk->is_unlocked() && !mrk->has_no_hash()) {
- return mrk->hash();
- } else if (mrk->is_marked()) {
- return mrk->hash();
+ markWord mrk = mark();
+ if (mrk.is_unlocked() && !mrk.has_no_hash()) {
+ return mrk.hash();
+ } else if (mrk.is_marked()) {
+ return mrk.hash();
} else {
return slow_identity_hash();
}
}
bool oopDesc::has_displaced_mark_raw() const {
- return mark_raw()->has_displaced_mark_helper();
+ return mark_raw().has_displaced_mark_helper();
}
-markOop oopDesc::displaced_mark_raw() const {
- return mark_raw()->displaced_mark_helper();
+markWord oopDesc::displaced_mark_raw() const {
+ return mark_raw().displaced_mark_helper();
}
-void oopDesc::set_displaced_mark_raw(markOop m) {
- mark_raw()->set_displaced_mark_helper(m);
+void oopDesc::set_displaced_mark_raw(markWord m) {
+ mark_raw().set_displaced_mark_helper(m);
}
#endif // SHARE_OOPS_OOP_INLINE_HPP
--- a/src/hotspot/share/oops/oopsHierarchy.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -39,15 +39,14 @@
typedef juint narrowKlass;
typedef void* OopOrNarrowOopStar;
-typedef class markOopDesc* markOop;
#ifndef CHECK_UNHANDLED_OOPS
-typedef class oopDesc* oop;
+typedef class oopDesc* oop;
typedef class instanceOopDesc* instanceOop;
-typedef class arrayOopDesc* arrayOop;
+typedef class arrayOopDesc* arrayOop;
typedef class objArrayOopDesc* objArrayOop;
-typedef class typeArrayOopDesc* typeArrayOop;
+typedef class typeArrayOopDesc* typeArrayOop;
#else
@@ -82,7 +81,6 @@
void register_oop();
void unregister_oop();
- // friend class markOop;
public:
void set_obj(const void* p) {
raw_set_obj(p);
@@ -121,7 +119,6 @@
operator oopDesc* () const volatile { return obj(); }
operator intptr_t* () const { return (intptr_t*)obj(); }
operator PromotedObject* () const { return (PromotedObject*)obj(); }
- operator markOop () const volatile { return markOop(obj()); }
operator address () const { return (address)obj(); }
// from javaCalls.cpp
--- a/src/hotspot/share/opto/library_call.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/opto/library_call.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3955,9 +3955,9 @@
Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
// Test the header to see if it is unlocked.
- Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
+ Node *lock_mask = _gvn.MakeConX(markWord::biased_lock_mask_in_place);
Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
- Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
+ Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
@@ -3967,8 +3967,8 @@
// We depend on hash_mask being at most 32 bits and avoid the use of
// hash_mask_in_place because it could be larger than 32 bits in a 64-bit
// vm: see markOop.hpp.
- Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask);
- Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
+ Node *hash_mask = _gvn.intcon(markWord::hash_mask);
+ Node *hash_shift = _gvn.intcon(markWord::hash_shift);
Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
// This hack lets the hash bits live anywhere in the mark object now, as long
// as the shift drops the relevant bits into the low 32 bits. Note that
@@ -3977,7 +3977,7 @@
hshifted_header = ConvX2I(hshifted_header);
Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
- Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash);
+ Node *no_hash_val = _gvn.intcon(markWord::no_hash);
Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
--- a/src/hotspot/share/opto/macro.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/opto/macro.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1638,7 +1638,7 @@
if (UseBiasedLocking && (length == NULL)) {
mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
} else {
- mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
+ mark_node = makecon(TypeRawPtr::make((address)markWord::prototype().value()));
}
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
@@ -2196,8 +2196,8 @@
// Get fast path - mark word has the biased lock pattern.
ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node,
- markOopDesc::biased_lock_mask_in_place,
- markOopDesc::biased_lock_pattern, true);
+ markWord::biased_lock_mask_in_place,
+ markWord::biased_lock_pattern, true);
// fast_lock_region->in(1) is set to slow path.
fast_lock_mem_phi->init_req(1, mem);
@@ -2226,7 +2226,7 @@
// Get slow path - mark word does NOT match the value.
Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node,
- (~markOopDesc::age_mask_in_place), 0);
+ (~markWord::age_mask_in_place), 0);
// region->in(3) is set to fast path - the object is biased to the current thread.
mem_phi->init_req(3, mem);
@@ -2237,7 +2237,7 @@
// First, check biased pattern.
// Get fast path - _prototype_header has the same biased lock pattern.
ctrl = opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node,
- markOopDesc::biased_lock_mask_in_place, 0, true);
+ markWord::biased_lock_mask_in_place, 0, true);
not_biased_ctrl = fast_lock_region->in(2); // Slow path
// fast_lock_region->in(2) - the prototype header is no longer biased
@@ -2259,7 +2259,7 @@
// Get slow path - mark word does NOT match epoch bits.
Node* epoch_ctrl = opt_bits_test(ctrl, rebiased_region, 1, x_node,
- markOopDesc::epoch_mask_in_place, 0);
+ markWord::epoch_mask_in_place, 0);
// The epoch of the current bias is not valid, attempt to rebias the object
// toward the current thread.
rebiased_region->init_req(2, epoch_ctrl);
@@ -2269,9 +2269,9 @@
// rebiased_region->in(1) is set to fast path.
// The epoch of the current bias is still valid but we know
// nothing about the owner; it might be set or it might be clear.
- Node* cmask = MakeConX(markOopDesc::biased_lock_mask_in_place |
- markOopDesc::age_mask_in_place |
- markOopDesc::epoch_mask_in_place);
+ Node* cmask = MakeConX(markWord::biased_lock_mask_in_place |
+ markWord::age_mask_in_place |
+ markWord::epoch_mask_in_place);
Node* old = transform_later(new AndXNode(mark_node, cmask));
cast_thread = transform_later(new CastP2XNode(ctrl, thread));
Node* new_mark = transform_later(new OrXNode(cast_thread, old));
@@ -2386,8 +2386,8 @@
Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
ctrl = opt_bits_test(ctrl, region, 3, mark_node,
- markOopDesc::biased_lock_mask_in_place,
- markOopDesc::biased_lock_pattern);
+ markWord::biased_lock_mask_in_place,
+ markWord::biased_lock_pattern);
} else {
region = new RegionNode(3);
// create a Phi for the memory state
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -965,18 +965,18 @@
address owner = NULL;
{
- markOop mark = hobj()->mark();
+ markWord mark = hobj()->mark();
- if (!mark->has_monitor()) {
+ if (!mark.has_monitor()) {
// this object has a lightweight monitor
- if (mark->has_locker()) {
- owner = (address)mark->locker(); // save the address of the Lock word
+ if (mark.has_locker()) {
+ owner = (address)mark.locker(); // save the address of the Lock word
}
// implied else: no owner
} else {
// this object has a heavyweight monitor
- mon = mark->monitor();
+ mon = mark.monitor();
// The owner field of a heavyweight monitor may be NULL for no
// owner, a JavaThread * or it may still be the address of the
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1628,8 +1628,8 @@
public:
void do_object(oop o) {
if (o != NULL) {
- markOop mark = o->mark();
- if (mark->is_marked()) {
+ markWord mark = o->mark();
+ if (mark.is_marked()) {
o->init_mark();
}
}
@@ -1641,7 +1641,7 @@
private:
// saved headers
static GrowableArray<oop>* _saved_oop_stack;
- static GrowableArray<markOop>* _saved_mark_stack;
+ static GrowableArray<markWord>* _saved_mark_stack;
static bool _needs_reset; // do we need to reset mark bits?
public:
@@ -1656,7 +1656,7 @@
};
GrowableArray<oop>* ObjectMarker::_saved_oop_stack = NULL;
-GrowableArray<markOop>* ObjectMarker::_saved_mark_stack = NULL;
+GrowableArray<markWord>* ObjectMarker::_saved_mark_stack = NULL;
bool ObjectMarker::_needs_reset = true; // need to reset mark bits by default
// initialize ObjectMarker - prepares for object marking
@@ -1667,7 +1667,7 @@
Universe::heap()->ensure_parsability(false); // no need to retire TLABs
// create stacks for interesting headers
- _saved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(4000, true);
+ _saved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(4000, true);
_saved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(4000, true);
if (UseBiasedLocking) {
@@ -1691,7 +1691,7 @@
// now restore the interesting headers
for (int i = 0; i < _saved_oop_stack->length(); i++) {
oop o = _saved_oop_stack->at(i);
- markOop mark = _saved_mark_stack->at(i);
+ markWord mark = _saved_mark_stack->at(i);
o->set_mark(mark);
}
@@ -1707,23 +1707,23 @@
// mark an object
inline void ObjectMarker::mark(oop o) {
assert(Universe::heap()->is_in(o), "sanity check");
- assert(!o->mark()->is_marked(), "should only mark an object once");
+ assert(!o->mark().is_marked(), "should only mark an object once");
// object's mark word
- markOop mark = o->mark();
-
- if (mark->must_be_preserved(o)) {
+ markWord mark = o->mark();
+
+ if (mark.must_be_preserved(o)) {
_saved_mark_stack->push(mark);
_saved_oop_stack->push(o);
}
// mark the object
- o->set_mark(markOopDesc::prototype()->set_marked());
+ o->set_mark(markWord::prototype().set_marked());
}
// return true if object is marked
inline bool ObjectMarker::visited(oop o) {
- return o->mark()->is_marked();
+ return o->mark().is_marked();
}
// Stack allocated class to help ensure that ObjectMarker is used
--- a/src/hotspot/share/prims/whitebox.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1748,7 +1748,7 @@
WB_ENTRY(jboolean, WB_IsMonitorInflated(JNIEnv* env, jobject wb, jobject obj))
oop obj_oop = JNIHandles::resolve(obj);
- return (jboolean) obj_oop->mark()->has_monitor();
+ return (jboolean) obj_oop->mark().has_monitor();
WB_END
WB_ENTRY(void, WB_ForceSafepoint(JNIEnv* env, jobject wb))
--- a/src/hotspot/share/runtime/arguments.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/arguments.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -2867,7 +2867,7 @@
if (FLAG_SET_CMDLINE(AlwaysTenure, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(MaxTenuringThreshold, markOopDesc::max_age + 1) != JVMFlag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(MaxTenuringThreshold, markWord::max_age + 1) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (match_option(option, "-XX:+AlwaysTenure")) {
--- a/src/hotspot/share/runtime/basicLock.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/basicLock.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -28,9 +28,9 @@
void BasicLock::print_on(outputStream* st) const {
st->print("monitor");
- markOop moop = displaced_header();
- if (moop != NULL)
- moop->print_on(st);
+ markWord mark_word = displaced_header();
+ if (mark_word.value() != 0)
+ mark_word.print_on(st);
}
void BasicLock::move_to(oop obj, BasicLock* dest) {
@@ -62,7 +62,7 @@
// is small (given the support for inflated fast-path locking in the fast_lock, etc)
// we'll leave that optimization for another time.
- if (displaced_header()->is_neutral()) {
+ if (displaced_header().is_neutral()) {
ObjectSynchronizer::inflate_helper(obj);
// WARNING: We can not put check here, because the inflation
// will not update the displaced header. Once BasicLock is inflated,
--- a/src/hotspot/share/runtime/basicLock.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/basicLock.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -26,16 +26,22 @@
#define SHARE_RUNTIME_BASICLOCK_HPP
#include "oops/markOop.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/handles.hpp"
class BasicLock {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
- volatile markOop _displaced_header;
+ volatile markWord _displaced_header;
public:
- markOop displaced_header() const { return _displaced_header; }
- void set_displaced_header(markOop header) { _displaced_header = header; }
+ markWord displaced_header() const {
+ return Atomic::load(&_displaced_header);
+ }
+
+ void set_displaced_header(markWord header) {
+ Atomic::store(header, &_displaced_header);
+ }
void print_on(outputStream* st) const;
--- a/src/hotspot/share/runtime/biasedLocking.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/biasedLocking.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -45,11 +45,11 @@
static bool _biased_locking_enabled = false;
BiasedLockingCounters BiasedLocking::_counters;
-static GrowableArray<Handle>* _preserved_oop_stack = NULL;
-static GrowableArray<markOop>* _preserved_mark_stack = NULL;
+static GrowableArray<Handle>* _preserved_oop_stack = NULL;
+static GrowableArray<markWord>* _preserved_mark_stack = NULL;
static void enable_biased_locking(InstanceKlass* k) {
- k->set_prototype_header(markOopDesc::biased_locking_prototype());
+ k->set_prototype_header(markWord::biased_locking_prototype());
}
static void enable_biased_locking() {
@@ -161,24 +161,24 @@
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
assert(Thread::current()->is_VM_thread(), "must be VMThread");
- markOop mark = obj->mark();
- if (!mark->has_bias_pattern()) {
+ markWord mark = obj->mark();
+ if (!mark.has_bias_pattern()) {
if (log_is_enabled(Info, biasedlocking)) {
ResourceMark rm;
log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT
", mark " INTPTR_FORMAT ", type %s"
", requesting thread " INTPTR_FORMAT
" because it's no longer biased)",
- p2i((void *)obj), (intptr_t) mark,
+ p2i((void *)obj), mark.value(),
obj->klass()->external_name(),
(intptr_t) requesting_thread);
}
return NOT_BIASED;
}
- uint age = mark->age();
- markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
+ uint age = mark.age();
+ markWord biased_prototype = markWord::biased_locking_prototype().set_age(age);
+ markWord unbiased_prototype = markWord::prototype().set_age(age);
// Log at "info" level if not bulk, else "trace" level
if (!is_bulk) {
@@ -187,9 +187,9 @@
INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
", allow rebias %d, requesting thread " INTPTR_FORMAT,
p2i((void *)obj),
- (intptr_t) mark,
+ mark.value(),
obj->klass()->external_name(),
- (intptr_t) obj->klass()->prototype_header(),
+ obj->klass()->prototype_header().value(),
(allow_rebias ? 1 : 0),
(intptr_t) requesting_thread);
} else {
@@ -198,14 +198,14 @@
INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
" , allow rebias %d , requesting thread " INTPTR_FORMAT,
p2i((void *)obj),
- (intptr_t) mark,
+ mark.value(),
obj->klass()->external_name(),
- (intptr_t) obj->klass()->prototype_header(),
+ obj->klass()->prototype_header().value(),
(allow_rebias ? 1 : 0),
(intptr_t) requesting_thread);
}
- JavaThread* biased_thread = mark->biased_locker();
+ JavaThread* biased_thread = mark.biased_locker();
if (biased_thread == NULL) {
// Object is anonymously biased. We can get here if, for
// example, we revoke the bias due to an identity hash code
@@ -270,7 +270,7 @@
p2i((void *) mon_info->owner()),
p2i((void *) obj));
// Assume recursive case and fix up highest lock below
- markOop mark = markOopDesc::encode((BasicLock*) NULL);
+ markWord mark = markWord::encode((BasicLock*) NULL);
highest_lock = mon_info->lock();
highest_lock->set_displaced_header(mark);
} else {
@@ -286,8 +286,8 @@
// Reset object header to point to displaced mark.
// Must release store the lock address for platforms without TSO
// ordering (e.g. ppc).
- obj->release_set_mark(markOopDesc::encode(highest_lock));
- assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
+ obj->release_set_mark(markWord::encode(highest_lock));
+ assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
// Log at "info" level if not bulk, else "trace" level
if (!is_bulk) {
log_info(biasedlocking)(" Revoked bias of currently-locked object");
@@ -327,8 +327,8 @@
static HeuristicsResult update_heuristics(oop o) {
- markOop mark = o->mark();
- if (!mark->has_bias_pattern()) {
+ markWord mark = o->mark();
+ if (!mark.has_bias_pattern()) {
return HR_NOT_BIASED;
}
@@ -390,7 +390,7 @@
INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
(bulk_rebias ? "rebias" : "revoke"),
p2i((void *) o),
- (intptr_t) o->mark(),
+ o->mark().value(),
o->klass()->external_name());
jlong cur_time = os::javaTimeMillis();
@@ -413,10 +413,10 @@
// try to update the epoch -- assume another VM operation came in
// and reset the header to the unbiased state, which will
// implicitly cause all existing biases to be revoked
- if (klass->prototype_header()->has_bias_pattern()) {
- int prev_epoch = klass->prototype_header()->bias_epoch();
- klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
- int cur_epoch = klass->prototype_header()->bias_epoch();
+ if (klass->prototype_header().has_bias_pattern()) {
+ int prev_epoch = klass->prototype_header().bias_epoch();
+ klass->set_prototype_header(klass->prototype_header().incr_bias_epoch());
+ int cur_epoch = klass->prototype_header().bias_epoch();
// Now walk all threads' stacks and adjust epochs of any biased
// and locked objects of this data type we encounter
@@ -425,11 +425,11 @@
for (int i = 0; i < cached_monitor_info->length(); i++) {
MonitorInfo* mon_info = cached_monitor_info->at(i);
oop owner = mon_info->owner();
- markOop mark = owner->mark();
- if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+ markWord mark = owner->mark();
+ if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
// We might have encountered this object already in the case of recursive locking
- assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
- owner->set_mark(mark->set_bias_epoch(cur_epoch));
+ assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
+ owner->set_mark(mark.set_bias_epoch(cur_epoch));
}
}
}
@@ -437,7 +437,7 @@
// At this point we're done. All we have to do is potentially
// adjust the header of the given object to revoke its bias.
- single_revoke_at_safepoint(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
+ single_revoke_at_safepoint(o, attempt_rebias_of_object && klass->prototype_header().has_bias_pattern(), true, requesting_thread, NULL);
} else {
if (log_is_enabled(Info, biasedlocking)) {
ResourceMark rm;
@@ -448,7 +448,7 @@
// cause future instances to not be biased, but existing biased
// instances will notice that this implicitly caused their biases
// to be revoked.
- klass->set_prototype_header(markOopDesc::prototype());
+ klass->set_prototype_header(markWord::prototype());
// Now walk all threads' stacks and forcibly revoke the biases of
// any locked and biased objects of this data type we encounter.
@@ -457,8 +457,8 @@
for (int i = 0; i < cached_monitor_info->length(); i++) {
MonitorInfo* mon_info = cached_monitor_info->at(i);
oop owner = mon_info->owner();
- markOop mark = owner->mark();
- if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+ markWord mark = owner->mark();
+ if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
single_revoke_at_safepoint(owner, false, true, requesting_thread, NULL);
}
}
@@ -475,17 +475,17 @@
BiasedLocking::Condition status_code = BIAS_REVOKED;
if (attempt_rebias_of_object &&
- o->mark()->has_bias_pattern() &&
- klass->prototype_header()->has_bias_pattern()) {
- markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
- klass->prototype_header()->bias_epoch());
+ o->mark().has_bias_pattern() &&
+ klass->prototype_header().has_bias_pattern()) {
+ markWord new_mark = markWord::encode(requesting_thread, o->mark().age(),
+ klass->prototype_header().bias_epoch());
o->set_mark(new_mark);
status_code = BIAS_REVOKED_AND_REBIASED;
log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
}
- assert(!o->mark()->has_bias_pattern() ||
- (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
+ assert(!o->mark().has_bias_pattern() ||
+ (attempt_rebias_of_object && (o->mark().biased_locker() == requesting_thread)),
"bug in bulk bias revocation");
return status_code;
@@ -566,28 +566,28 @@
assert(target == _biased_locker, "Wrong thread");
oop o = _obj();
- markOop mark = o->mark();
+ markWord mark = o->mark();
- if (!mark->has_bias_pattern()) {
+ if (!mark.has_bias_pattern()) {
return;
}
- markOop prototype = o->klass()->prototype_header();
- if (!prototype->has_bias_pattern()) {
+ markWord prototype = o->klass()->prototype_header();
+ if (!prototype.has_bias_pattern()) {
// This object has a stale bias from before the handshake
// was requested. If we fail this race, the object's bias
// has been revoked by another thread so we simply return.
- markOop biased_value = mark;
- mark = o->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
- assert(!o->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
+ markWord biased_value = mark;
+ mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
+ assert(!o->mark().has_bias_pattern(), "even if we raced, should still be revoked");
if (biased_value == mark) {
_status_code = BiasedLocking::BIAS_REVOKED;
}
return;
}
- if (_biased_locker == mark->biased_locker()) {
- if (mark->bias_epoch() == prototype->bias_epoch()) {
+ if (_biased_locker == mark.biased_locker()) {
+ if (mark.bias_epoch() == prototype.bias_epoch()) {
// Epoch is still valid. This means biaser could be currently
// synchronized on this object. We must walk its stack looking
// for monitor records associated with this object and change
@@ -595,15 +595,15 @@
ResourceMark rm;
BiasedLocking::walk_stack_and_revoke(o, _biased_locker);
_biased_locker->set_cached_monitor_info(NULL);
- assert(!o->mark()->has_bias_pattern(), "invariant");
+ assert(!o->mark().has_bias_pattern(), "invariant");
_biased_locker_id = JFR_THREAD_ID(_biased_locker);
_status_code = BiasedLocking::BIAS_REVOKED;
return;
} else {
- markOop biased_value = mark;
- mark = o->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
- if (mark == biased_value || !mark->has_bias_pattern()) {
- assert(!o->mark()->has_bias_pattern(), "should be revoked");
+ markWord biased_value = mark;
+ mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
+ if (mark == biased_value || !mark.has_bias_pattern()) {
+ assert(!o->mark().has_bias_pattern(), "should be revoked");
_status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED;
return;
}
@@ -675,7 +675,7 @@
if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) {
post_revocation_event(&event, obj->klass(), &revoke);
}
- assert(!obj->mark()->has_bias_pattern(), "invariant");
+ assert(!obj->mark().has_bias_pattern(), "invariant");
return revoke.status_code();
} else {
// Thread was not alive.
@@ -684,20 +684,20 @@
// on this object.
{
MutexLocker ml(Threads_lock);
- markOop mark = obj->mark();
+ markWord mark = obj->mark();
// Check if somebody else was able to revoke it before biased thread exited.
- if (!mark->has_bias_pattern()) {
+ if (!mark.has_bias_pattern()) {
return NOT_BIASED;
}
ThreadsListHandle tlh;
- markOop prototype = obj->klass()->prototype_header();
- if (!prototype->has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark->biased_locker() &&
- prototype->bias_epoch() == mark->bias_epoch())) {
- obj->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
+ markWord prototype = obj->klass()->prototype_header();
+ if (!prototype.has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark.biased_locker() &&
+ prototype.bias_epoch() == mark.bias_epoch())) {
+ obj->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
if (event.should_commit()) {
post_revocation_event(&event, obj->klass(), &revoke);
}
- assert(!obj->mark()->has_bias_pattern(), "bias should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "bias should be revoked by now");
return BIAS_REVOKED;
}
}
@@ -713,9 +713,9 @@
"if ThreadLocalHandshakes is enabled this should always be executed outside safepoints");
assert(Thread::current() == biased_locker || Thread::current()->is_VM_thread(), "wrong thread");
- markOop mark = obj->mark();
- assert(mark->biased_locker() == biased_locker &&
- obj->klass()->prototype_header()->bias_epoch() == mark->bias_epoch(), "invariant");
+ markWord mark = obj->mark();
+ assert(mark.biased_locker() == biased_locker &&
+ obj->klass()->prototype_header().bias_epoch() == mark.bias_epoch(), "invariant");
log_trace(biasedlocking)("%s(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark "
INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
@@ -723,13 +723,13 @@
Thread::current()->is_VM_thread() ? "VMThread" : "JavaThread",
p2i(Thread::current()),
p2i(obj),
- p2i(mark),
+ mark.value(),
obj->klass()->external_name(),
- p2i(obj->klass()->prototype_header()),
+ obj->klass()->prototype_header().value(),
p2i(biased_locker),
Thread::current()->is_VM_thread() ? "" : "(walking own stack)");
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(obj->mark()->age());
+ markWord unbiased_prototype = markWord::prototype().set_age(obj->mark().age());
GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_locker);
BasicLock* highest_lock = NULL;
@@ -740,7 +740,7 @@
p2i(mon_info->owner()),
p2i(obj));
// Assume recursive case and fix up highest lock below
- markOop mark = markOopDesc::encode((BasicLock*) NULL);
+ markWord mark = markWord::encode((BasicLock*) NULL);
highest_lock = mon_info->lock();
highest_lock->set_displaced_header(mark);
} else {
@@ -756,8 +756,8 @@
// Reset object header to point to displaced mark.
// Must release store the lock address for platforms without TSO
// ordering (e.g. ppc).
- obj->release_set_mark(markOopDesc::encode(highest_lock));
- assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
+ obj->release_set_mark(markWord::encode(highest_lock));
+ assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
log_info(biasedlocking)(" Revoked bias of currently-locked object");
} else {
log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
@@ -765,7 +765,7 @@
obj->set_mark(unbiased_prototype);
}
- assert(!obj->mark()->has_bias_pattern(), "must not be biased");
+ assert(!obj->mark().has_bias_pattern(), "must not be biased");
}
@@ -777,35 +777,35 @@
// efficiently enough that we should not cause these revocations to
// update the heuristics because doing so may cause unwanted bulk
// revocations (which are expensive) to occur.
- markOop mark = obj->mark();
- if (mark->is_biased_anonymously() && !attempt_rebias) {
+ markWord mark = obj->mark();
+ if (mark.is_biased_anonymously() && !attempt_rebias) {
// We are probably trying to revoke the bias of this object due to
// an identity hash code computation. Try to revoke the bias
// without a safepoint. This is possible if we can successfully
// compare-and-exchange an unbiased header into the mark word of
// the object, meaning that no other thread has raced to acquire
// the bias of the object.
- markOop biased_value = mark;
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
- markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
+ markWord biased_value = mark;
+ markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
+ markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
if (res_mark == biased_value) {
return BIAS_REVOKED;
}
mark = res_mark; // Refresh mark with the latest value.
- } else if (mark->has_bias_pattern()) {
+ } else if (mark.has_bias_pattern()) {
Klass* k = obj->klass();
- markOop prototype_header = k->prototype_header();
- if (!prototype_header->has_bias_pattern()) {
+ markWord prototype_header = k->prototype_header();
+ if (!prototype_header.has_bias_pattern()) {
// This object has a stale bias from before the bulk revocation
// for this data type occurred. It's pointless to update the
// heuristics at this point so simply update the header with a
// CAS. If we fail this race, the object's bias has been revoked
// by another thread so we simply return and let the caller deal
// with it.
- obj->cas_set_mark(prototype_header->set_age(mark->age()), mark);
- assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
+ obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
+ assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
return BIAS_REVOKED;
- } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
+ } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
// The epoch of this biasing has expired indicating that the
// object is effectively unbiased. Depending on whether we need
// to rebias or revoke the bias of this object we can do it
@@ -813,18 +813,18 @@
// heuristics. This is normally done in the assembly code but we
// can reach this point due to various points in the runtime
// needing to revoke biases.
- markOop res_mark;
+ markWord res_mark;
if (attempt_rebias) {
assert(THREAD->is_Java_thread(), "");
- markOop biased_value = mark;
- markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
+ markWord biased_value = mark;
+ markWord rebiased_prototype = markWord::encode((JavaThread*) THREAD, mark.age(), prototype_header.bias_epoch());
res_mark = obj->cas_set_mark(rebiased_prototype, mark);
if (res_mark == biased_value) {
return BIAS_REVOKED_AND_REBIASED;
}
} else {
- markOop biased_value = mark;
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+ markWord biased_value = mark;
+ markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
res_mark = obj->cas_set_mark(unbiased_prototype, mark);
if (res_mark == biased_value) {
return BIAS_REVOKED;
@@ -838,7 +838,7 @@
if (heuristics == HR_NOT_BIASED) {
return NOT_BIASED;
} else if (heuristics == HR_SINGLE_REVOKE) {
- JavaThread *blt = mark->biased_locker();
+ JavaThread *blt = mark.biased_locker();
assert(blt != NULL, "invariant");
if (blt == THREAD) {
// A thread is trying to revoke the bias of an object biased
@@ -851,7 +851,7 @@
ResourceMark rm;
walk_stack_and_revoke(obj(), blt);
blt->set_cached_monitor_info(NULL);
- assert(!obj->mark()->has_bias_pattern(), "invariant");
+ assert(!obj->mark().has_bias_pattern(), "invariant");
if (event.should_commit()) {
post_self_revocation_event(&event, obj->klass());
}
@@ -883,8 +883,8 @@
bool clean_my_cache = false;
for (int i = 0; i < objs->length(); i++) {
oop obj = (objs->at(i))();
- markOop mark = obj->mark();
- if (mark->has_bias_pattern()) {
+ markWord mark = obj->mark();
+ if (mark.has_bias_pattern()) {
walk_stack_and_revoke(obj, biaser);
clean_my_cache = true;
}
@@ -948,7 +948,7 @@
// monitors in a prepass and, if they are biased, preserve their
// mark words here. This should be a relatively small set of objects
// especially compared to the number of objects in the heap.
- _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
+ _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(10, true);
_preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
ResourceMark rm;
@@ -966,8 +966,8 @@
if (mon_info->owner_is_scalar_replaced()) continue;
oop owner = mon_info->owner();
if (owner != NULL) {
- markOop mark = owner->mark();
- if (mark->has_bias_pattern()) {
+ markWord mark = owner->mark();
+ if (mark.has_bias_pattern()) {
_preserved_oop_stack->push(Handle(cur, owner));
_preserved_mark_stack->push(mark);
}
@@ -990,7 +990,7 @@
int len = _preserved_oop_stack->length();
for (int i = 0; i < len; i++) {
Handle owner = _preserved_oop_stack->at(i);
- markOop mark = _preserved_mark_stack->at(i);
+ markWord mark = _preserved_mark_stack->at(i);
owner->set_mark(mark);
}
--- a/src/hotspot/share/runtime/biasedLocking.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/biasedLocking.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -102,7 +102,7 @@
// was used in a prior version of this algorithm and did not scale
// well). If too many bias revocations persist, biasing is completely
// disabled for the data type by resetting the prototype header to the
-// unbiased markOop. The fast-path locking code checks to see whether
+// unbiased markWord. The fast-path locking code checks to see whether
// the instance's bias pattern differs from the prototype header's and
// causes the bias to be revoked without reaching a safepoint or,
// again, a bulk heap sweep.
--- a/src/hotspot/share/runtime/deoptimization.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/deoptimization.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -1252,15 +1252,15 @@
assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
if (!mon_info->owner_is_scalar_replaced()) {
Handle obj(thread, mon_info->owner());
- markOop mark = obj->mark();
- if (UseBiasedLocking && mark->has_bias_pattern()) {
+ markWord mark = obj->mark();
+ if (UseBiasedLocking && mark.has_bias_pattern()) {
// New allocated objects may have the mark set to anonymously biased.
// Also the deoptimized method may called methods with synchronization
// where the thread-local object is bias locked to the current thread.
- assert(mark->is_biased_anonymously() ||
- mark->biased_locker() == thread, "should be locked to current thread");
+ assert(mark.is_biased_anonymously() ||
+ mark.biased_locker() == thread, "should be locked to current thread");
// Reset mark word to unbiased prototype.
- markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+ markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
obj->set_mark(unbiased_prototype);
}
BasicLock* lock = mon_info->lock();
--- a/src/hotspot/share/runtime/globals.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/globals.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -2348,7 +2348,7 @@
product(uintx, StringDeduplicationAgeThreshold, 3, \
"A string must reach this age (or be promoted to an old region) " \
"to be considered for deduplication") \
- range(1, markOopDesc::max_age) \
+ range(1, markWord::max_age) \
\
diagnostic(bool, StringDeduplicationResizeALot, false, \
"Force table resize every time the table is scanned") \
--- a/src/hotspot/share/runtime/objectMonitor.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/objectMonitor.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -277,10 +277,10 @@
assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
_recursions);
- assert(((oop)object())->mark() == markOopDesc::encode(this),
+ assert(((oop)object())->mark() == markWord::encode(this),
"object mark must match encoded this: mark=" INTPTR_FORMAT
- ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
- p2i(markOopDesc::encode(this)));
+ ", encoded this=" INTPTR_FORMAT, ((oop)object())->mark().value(),
+ markWord::encode(this).value());
Self->_Stalled = 0;
return;
}
@@ -365,7 +365,7 @@
assert(_recursions == 0, "invariant");
assert(_owner == Self, "invariant");
assert(_succ != Self, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
// The thread -- now the owner -- is back in vm mode.
// Report the glorious news via TI,DTrace and jvmstat.
@@ -593,7 +593,7 @@
assert(_owner == Self, "invariant");
assert(object() != NULL, "invariant");
// I'd like to write:
- // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+ // guarantee (((oop)(object()))->mark() == markWord::encode(this), "invariant") ;
// but as we're at a safepoint that's not safe.
UnlinkAfterAcquire(Self, &node);
@@ -661,7 +661,7 @@
assert(SelfNode != NULL, "invariant");
assert(SelfNode->_thread == Self, "invariant");
assert(_waiters > 0, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
JavaThread * jt = (JavaThread *) Self;
@@ -729,7 +729,7 @@
// In addition, Self.TState is stable.
assert(_owner == Self, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
UnlinkAfterAcquire(Self, SelfNode);
if (_succ == Self) _succ = NULL;
assert(_succ != Self, "invariant");
@@ -1395,7 +1395,7 @@
// Verify a few postconditions
assert(_owner == Self, "invariant");
assert(_succ != Self, "invariant");
- assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+ assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
// check if the notification happened
if (!WasNotified) {
@@ -1935,7 +1935,7 @@
}
void ObjectMonitor::print_on(outputStream* st) const {
- // The minimal things to print for markOop printing, more can be added for debugging and logging.
+ // The minimal things to print for markWord printing, more can be added for debugging and logging.
st->print("{contentions=0x%08x,waiters=0x%08x"
",recursions=" INTPTR_FORMAT ",owner=" INTPTR_FORMAT "}",
contentions(), waiters(), recursions(),
--- a/src/hotspot/share/runtime/objectMonitor.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/objectMonitor.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
+#include "oops/markOop.hpp"
#include "runtime/os.hpp"
#include "runtime/park.hpp"
#include "runtime/perfData.hpp"
@@ -74,7 +75,7 @@
// ObjectMonitor Layout Overview/Highlights/Restrictions:
//
// - The _header field must be at offset 0 because the displaced header
-// from markOop is stored there. We do not want markOop.hpp to include
+// from markWord is stored there. We do not want markOop.hpp to include
// ObjectMonitor.hpp to avoid exposing ObjectMonitor everywhere. This
// means that ObjectMonitor cannot inherit from any other class nor can
// it use any virtual member functions. This restriction is critical to
@@ -141,13 +142,13 @@
friend class VMStructs;
JVMCI_ONLY(friend class JVMCIVMStructs;)
- volatile markOop _header; // displaced object header word - mark
+ volatile markWord _header; // displaced object header word - mark
void* volatile _object; // backward object pointer - strong root
public:
ObjectMonitor* FreeNext; // Free list linkage
private:
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
- sizeof(volatile markOop) + sizeof(void * volatile) +
+ sizeof(volatile markWord) + sizeof(void * volatile) +
sizeof(ObjectMonitor *));
protected: // protected for JvmtiRawMonitor
void * volatile _owner; // pointer to owning thread OR BasicLock
@@ -213,7 +214,7 @@
static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
- // ObjectMonitor references can be ORed with markOopDesc::monitor_value
+ // ObjectMonitor references can be ORed with markWord::monitor_value
// as part of the ObjectMonitor tagging mechanism. When we combine an
// ObjectMonitor reference with an offset, we need to remove the tag
// value in order to generate the proper address.
@@ -225,11 +226,11 @@
// to the ObjectMonitor reference manipulation code:
//
#define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
- ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
+ ((ObjectMonitor::f ## _offset_in_bytes()) - markWord::monitor_value)
- markOop header() const;
- volatile markOop* header_addr();
- void set_header(markOop hdr);
+ markWord header() const;
+ volatile markWord* header_addr();
+ void set_header(markWord hdr);
intptr_t is_busy() const {
// TODO-FIXME: assert _owner == null implies _recursions = 0
--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp Tue Aug 06 10:48:21 2019 +0200
@@ -25,6 +25,8 @@
#ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
#define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
+#include "runtime/atomic.hpp"
+
inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
if (THREAD == _owner || THREAD->is_lock_owned((address) _owner)) {
return 1;
@@ -32,17 +34,17 @@
return 0;
}
-inline markOop ObjectMonitor::header() const {
- return _header;
+inline markWord ObjectMonitor::header() const {
+ return Atomic::load(&_header);
}
-inline volatile markOop* ObjectMonitor::header_addr() {
+inline volatile markWord* ObjectMonitor::header_addr() {
assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
return &_header;
}
-inline void ObjectMonitor::set_header(markOop hdr) {
- _header = hdr;
+inline void ObjectMonitor::set_header(markWord hdr) {
+ Atomic::store(hdr, &_header);
}
inline jint ObjectMonitor::waiters() const {
@@ -54,14 +56,14 @@
}
inline void ObjectMonitor::clear() {
- assert(_header != NULL, "must be non-NULL");
+ assert(Atomic::load(&_header).value() != 0, "must be non-zero");
assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
assert(_object != NULL, "must be non-NULL");
assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
- _header = NULL;
+ Atomic::store(markWord::zero(), &_header);
_object = NULL;
}
--- a/src/hotspot/share/runtime/sharedRuntime.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -3112,10 +3112,10 @@
if (kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
BasicLock *lock = kptr2->lock();
// Inflate so the displaced header becomes position-independent
- if (lock->displaced_header()->is_unlocked())
+ if (lock->displaced_header().is_unlocked())
ObjectSynchronizer::inflate_helper(kptr2->obj());
// Now the displaced header is free to move
- buf[i++] = (intptr_t)lock->displaced_header();
+ buf[i++] = (intptr_t)lock->displaced_header().value();
buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
}
}
--- a/src/hotspot/share/runtime/synchronizer.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/synchronizer.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -161,16 +161,16 @@
assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // slow-path for invalid obj
- const markOop mark = obj->mark();
+ const markWord mark = obj->mark();
- if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
+ if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) {
// Degenerate notify
// stack-locked by caller so by definition the implied waitset is empty.
return true;
}
- if (mark->has_monitor()) {
- ObjectMonitor * const mon = mark->monitor();
+ if (mark.has_monitor()) {
+ ObjectMonitor * const mon = mark.monitor();
assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
if (mon->owner() != self) return false; // slow-path for IMS exception
@@ -211,10 +211,10 @@
assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // Need to throw NPE
- const markOop mark = obj->mark();
+ const markWord mark = obj->mark();
- if (mark->has_monitor()) {
- ObjectMonitor * const m = mark->monitor();
+ if (mark.has_monitor()) {
+ ObjectMonitor * const m = mark.monitor();
assert(oopDesc::equals((oop) m->object(), obj), "invariant");
Thread * const owner = (Thread *) m->_owner;
@@ -238,7 +238,7 @@
// stack-locking in the object's header, the third check is for
// recursive stack-locking in the displaced header in the BasicLock,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
- lock->set_displaced_header(markOopDesc::unused_mark());
+ lock->set_displaced_header(markWord::unused_mark());
if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
assert(m->_recursions == 0, "invariant");
@@ -275,31 +275,31 @@
assert(!attempt_rebias, "can not rebias toward VM thread");
BiasedLocking::revoke_at_safepoint(obj);
}
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
slow_enter(obj, lock, THREAD);
}
void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
- markOop mark = object->mark();
+ markWord mark = object->mark();
// We cannot check for Biased Locking if we are racing an inflation.
- assert(mark == markOopDesc::INFLATING() ||
- !mark->has_bias_pattern(), "should not see bias pattern here");
+ assert(mark == markWord::INFLATING() ||
+ !mark.has_bias_pattern(), "should not see bias pattern here");
- markOop dhw = lock->displaced_header();
- if (dhw == NULL) {
+ markWord dhw = lock->displaced_header();
+ if (dhw.value() == 0) {
// If the displaced header is NULL, then this exit matches up with
// a recursive enter. No real work to do here except for diagnostics.
#ifndef PRODUCT
- if (mark != markOopDesc::INFLATING()) {
+ if (mark != markWord::INFLATING()) {
// Only do diagnostics if we are not racing an inflation. Simply
// exiting a recursive enter of a Java Monitor that is being
// inflated is safe; see the has_monitor() comment below.
- assert(!mark->is_neutral(), "invariant");
- assert(!mark->has_locker() ||
- THREAD->is_lock_owned((address)mark->locker()), "invariant");
- if (mark->has_monitor()) {
+ assert(!mark.is_neutral(), "invariant");
+ assert(!mark.has_locker() ||
+ THREAD->is_lock_owned((address)mark.locker()), "invariant");
+ if (mark.has_monitor()) {
// The BasicLock's displaced_header is marked as a recursive
// enter and we have an inflated Java Monitor (ObjectMonitor).
// This is a special case where the Java Monitor was inflated
@@ -308,7 +308,7 @@
// Monitor owner's stack and update the BasicLocks because a
// Java Monitor can be asynchronously inflated by a thread that
// does not own the Java Monitor.
- ObjectMonitor * m = mark->monitor();
+ ObjectMonitor * m = mark.monitor();
assert(((oop)(m->object()))->mark() == mark, "invariant");
assert(m->is_entered(THREAD), "invariant");
}
@@ -317,10 +317,10 @@
return;
}
- if (mark == (markOop) lock) {
+ if (mark == markWord::from_pointer(lock)) {
// If the object is stack-locked by the current thread, try to
// swing the displaced header from the BasicLock back to the mark.
- assert(dhw->is_neutral(), "invariant");
+ assert(dhw.is_neutral(), "invariant");
if (object->cas_set_mark(dhw, mark) == mark) {
return;
}
@@ -336,22 +336,22 @@
// We don't need to use fast path here, because it must have been
// failed in the interpreter/compiler code.
void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
- markOop mark = obj->mark();
- assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+ markWord mark = obj->mark();
+ assert(!mark.has_bias_pattern(), "should not see bias pattern here");
- if (mark->is_neutral()) {
+ if (mark.is_neutral()) {
// Anticipate successful CAS -- the ST of the displaced mark must
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
- if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
+ if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
return;
}
// Fall through to inflate() ...
- } else if (mark->has_locker() &&
- THREAD->is_lock_owned((address)mark->locker())) {
- assert(lock != mark->locker(), "must not re-lock the same lock");
- assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
- lock->set_displaced_header(NULL);
+ } else if (mark.has_locker() &&
+ THREAD->is_lock_owned((address)mark.locker())) {
+ assert(lock != mark.locker(), "must not re-lock the same lock");
+ assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
+ lock->set_displaced_header(markWord::from_pointer(NULL));
return;
}
@@ -359,7 +359,7 @@
// so it does not matter what the value is, except that it
// must be non-zero to avoid looking like a re-entrant lock,
// and must not look locked either.
- lock->set_displaced_header(markOopDesc::unused_mark());
+ lock->set_displaced_header(markWord::unused_mark());
inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
}
@@ -386,7 +386,7 @@
intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
@@ -398,7 +398,7 @@
void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
@@ -412,7 +412,7 @@
// the current locking is from JNI instead of Java code
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
THREAD->set_current_pending_monitor_is_from_java(false);
inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
@@ -426,7 +426,7 @@
BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
obj = h_obj();
}
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
// If this thread has locked the object, exit the monitor. We
@@ -464,7 +464,7 @@
int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
@@ -484,7 +484,7 @@
void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
@@ -495,11 +495,11 @@
void ObjectSynchronizer::notify(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
- markOop mark = obj->mark();
- if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+ markWord mark = obj->mark();
+ if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
return;
}
inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
@@ -509,11 +509,11 @@
void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
- markOop mark = obj->mark();
- if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+ markWord mark = obj->mark();
+ if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
return;
}
inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
@@ -556,16 +556,16 @@
static int MonitorScavengeThreshold = 1000000;
static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
-static markOop ReadStableMark(oop obj) {
- markOop mark = obj->mark();
- if (!mark->is_being_inflated()) {
+static markWord ReadStableMark(oop obj) {
+ markWord mark = obj->mark();
+ if (!mark.is_being_inflated()) {
return mark; // normal fast-path return
}
int its = 0;
for (;;) {
- markOop mark = obj->mark();
- if (!mark->is_being_inflated()) {
+ markWord mark = obj->mark();
+ if (!mark.is_being_inflated()) {
return mark; // normal fast-path return
}
@@ -604,7 +604,7 @@
assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
- while (obj->mark() == markOopDesc::INFLATING()) {
+ while (obj->mark() == markWord::INFLATING()) {
// Beware: NakedYield() is advisory and has almost no effect on some platforms
// so we periodically call Self->_ParkEvent->park(1).
// We use a mixed spin/yield/block mechanism.
@@ -673,9 +673,9 @@
value = v;
}
- value &= markOopDesc::hash_mask;
+ value &= markWord::hash_mask;
if (value == 0) value = 0xBAD;
- assert(value != markOopDesc::no_hash, "invariant");
+ assert(value != markWord::no_hash, "invariant");
return value;
}
@@ -688,7 +688,7 @@
// been checked to make sure they can handle a safepoint. The
// added check of the bias pattern is to avoid useless calls to
// thread-local storage.
- if (obj->mark()->has_bias_pattern()) {
+ if (obj->mark().has_bias_pattern()) {
// Handle for oop obj in case of STW safepoint
Handle hobj(Self, obj);
// Relaxing assertion for bug 6320749.
@@ -697,7 +697,7 @@
"biases should not be seen by VM thread here");
BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
obj = hobj();
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
}
@@ -711,20 +711,20 @@
((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
ObjectMonitor* monitor = NULL;
- markOop temp, test;
+ markWord temp, test;
intptr_t hash;
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// object should remain ineligible for biased locking
- assert(!mark->has_bias_pattern(), "invariant");
+ assert(!mark.has_bias_pattern(), "invariant");
- if (mark->is_neutral()) {
- hash = mark->hash(); // this is a normal header
+ if (mark.is_neutral()) {
+ hash = mark.hash(); // this is a normal header
if (hash != 0) { // if it has hash, just return it
return hash;
}
hash = get_next_hash(Self, obj); // allocate a new hash code
- temp = mark->copy_set_hash(hash); // merge the hash code into header
+ temp = mark.copy_set_hash(hash); // merge the hash code into header
// use (machine word version) atomic operation to install the hash
test = obj->cas_set_mark(temp, mark);
if (test == mark) {
@@ -733,20 +733,20 @@
// If atomic operation failed, we must inflate the header
// into heavy weight monitor. We could add more code here
// for fast path, but it does not worth the complexity.
- } else if (mark->has_monitor()) {
- monitor = mark->monitor();
+ } else if (mark.has_monitor()) {
+ monitor = mark.monitor();
temp = monitor->header();
- assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
- hash = temp->hash();
+ assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+ hash = temp.hash();
if (hash != 0) {
return hash;
}
// Skip to the following code to reduce code size
- } else if (Self->is_lock_owned((address)mark->locker())) {
- temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
- assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
- hash = temp->hash(); // by current thread, check if the displaced
- if (hash != 0) { // header contains hash code
+ } else if (Self->is_lock_owned((address)mark.locker())) {
+ temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned
+ assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+ hash = temp.hash(); // by current thread, check if the displaced
+ if (hash != 0) { // header contains hash code
return hash;
}
// WARNING:
@@ -763,19 +763,20 @@
monitor = inflate(Self, obj, inflate_cause_hash_code);
// Load displaced header and check it has hash code
mark = monitor->header();
- assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
- hash = mark->hash();
+ assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
+ hash = mark.hash();
if (hash == 0) {
hash = get_next_hash(Self, obj);
- temp = mark->copy_set_hash(hash); // merge hash code into header
- assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
- test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
+ temp = mark.copy_set_hash(hash); // merge hash code into header
+ assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
+ uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
+ test = markWord(v);
if (test != mark) {
// The only update to the ObjectMonitor's header/dmw field
// is to merge in the hash code. If someone adds a new usage
// of the header/dmw field, please update this code.
- hash = test->hash();
- assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test));
+ hash = test.hash();
+ assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
assert(hash != 0, "Trivial unexpected object/monitor header usage.");
}
}
@@ -794,25 +795,25 @@
Handle h_obj) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(h_obj, false, thread);
- assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
assert(thread == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// Uncontended case, header points to stack
- if (mark->has_locker()) {
- return thread->is_lock_owned((address)mark->locker());
+ if (mark.has_locker()) {
+ return thread->is_lock_owned((address)mark.locker());
}
// Contended case, header points to ObjectMonitor (tagged pointer)
- if (mark->has_monitor()) {
- ObjectMonitor* monitor = mark->monitor();
+ if (mark.has_monitor()) {
+ ObjectMonitor* monitor = mark.monitor();
return monitor->is_entered(thread) != 0;
}
// Unlocked case, header in place
- assert(mark->is_neutral(), "sanity check");
+ assert(mark.is_neutral(), "sanity check");
return false;
}
@@ -830,35 +831,35 @@
// Possible mark states: neutral, biased, stack-locked, inflated
- if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+ if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
// CASE: biased
BiasedLocking::revoke_and_rebias(h_obj, false, self);
- assert(!h_obj->mark()->has_bias_pattern(),
+ assert(!h_obj->mark().has_bias_pattern(),
"biases should be revoked by now");
}
assert(self == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
- if (mark->has_locker()) {
- return self->is_lock_owned((address)mark->locker()) ?
+ if (mark.has_locker()) {
+ return self->is_lock_owned((address)mark.locker()) ?
owner_self : owner_other;
}
// CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
// The Object:ObjectMonitor relationship is stable as long as we're
// not at a safepoint.
- if (mark->has_monitor()) {
- void * owner = mark->monitor()->_owner;
+ if (mark.has_monitor()) {
+ void * owner = mark.monitor()->_owner;
if (owner == NULL) return owner_none;
return (owner == self ||
self->is_lock_owned((address)owner)) ? owner_self : owner_other;
}
// CASE: neutral
- assert(mark->is_neutral(), "sanity check");
+ assert(mark.is_neutral(), "sanity check");
return owner_none; // it's unlocked
}
@@ -870,22 +871,22 @@
} else {
BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
}
- assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
oop obj = h_obj();
address owner = NULL;
- markOop mark = ReadStableMark(obj);
+ markWord mark = ReadStableMark(obj);
// Uncontended case, header points to stack
- if (mark->has_locker()) {
- owner = (address) mark->locker();
+ if (mark.has_locker()) {
+ owner = (address) mark.locker();
}
// Contended case, header points to ObjectMonitor (tagged pointer)
- else if (mark->has_monitor()) {
- ObjectMonitor* monitor = mark->monitor();
+ else if (mark.has_monitor()) {
+ ObjectMonitor* monitor = mark.monitor();
assert(monitor != NULL, "monitor should be non-null");
owner = (address) monitor->owner();
}
@@ -898,7 +899,7 @@
// Unlocked case, header in place
// Cannot have assertion since this object may have been
// locked by another thread when reaching here.
- // assert(mark->is_neutral(), "sanity check");
+ // assert(mark.is_neutral(), "sanity check");
return NULL;
}
@@ -1165,7 +1166,7 @@
void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
bool fromPerThreadAlloc) {
- guarantee(m->header() == NULL, "invariant");
+ guarantee(m->header().value() == 0, "invariant");
guarantee(m->object() == NULL, "invariant");
stringStream ss;
guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
@@ -1301,10 +1302,10 @@
// Fast path code shared by multiple functions
void ObjectSynchronizer::inflate_helper(oop obj) {
- markOop mark = obj->mark();
- if (mark->has_monitor()) {
- assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
- assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
+ markWord mark = obj->mark();
+ if (mark.has_monitor()) {
+ assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
+ assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
return;
}
inflate(Thread::current(), obj, inflate_cause_vm_internal);
@@ -1321,8 +1322,8 @@
EventJavaMonitorInflate event;
for (;;) {
- const markOop mark = object->mark();
- assert(!mark->has_bias_pattern(), "invariant");
+ const markWord mark = object->mark();
+ assert(!mark.has_bias_pattern(), "invariant");
// The mark can be in one of the following states:
// * Inflated - just return
@@ -1332,10 +1333,10 @@
// * BIASED - Illegal. We should never see this
// CASE: inflated
- if (mark->has_monitor()) {
- ObjectMonitor * inf = mark->monitor();
- markOop dmw = inf->header();
- assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
+ if (mark.has_monitor()) {
+ ObjectMonitor * inf = mark.monitor();
+ markWord dmw = inf->header();
+ assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
assert(oopDesc::equals((oop) inf->object(), object), "invariant");
assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
return inf;
@@ -1347,7 +1348,7 @@
// The INFLATING value is transient.
// Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
// We could always eliminate polling by parking the thread on some auxiliary list.
- if (mark == markOopDesc::INFLATING()) {
+ if (mark == markWord::INFLATING()) {
ReadStableMark(object);
continue;
}
@@ -1373,7 +1374,7 @@
LogStreamHandle(Trace, monitorinflation) lsh;
- if (mark->has_locker()) {
+ if (mark.has_locker()) {
ObjectMonitor * m = omAlloc(Self);
// Optimistically prepare the objectmonitor - anticipate successful CAS
// We do this before the CAS in order to minimize the length of time
@@ -1382,7 +1383,7 @@
m->_Responsible = NULL;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
- markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
+ markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
if (cmp != mark) {
omRelease(Self, m, true);
continue; // Interference -- just retry
@@ -1410,7 +1411,7 @@
// drop the lock (restoring the header from the basiclock to the object)
// while inflation is in-progress. This protocol avoids races that might
// would otherwise permit hashCode values to change or "flicker" for an object.
- // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
+ // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
// 0 serves as a "BUSY" inflate-in-progress indicator.
@@ -1418,27 +1419,27 @@
// The owner can't die or unwind past the lock while our INFLATING
// object is in the mark. Furthermore the owner can't complete
// an unlock on the object, either.
- markOop dmw = mark->displaced_mark_helper();
+ markWord dmw = mark.displaced_mark_helper();
// Catch if the object's header is not neutral (not locked and
// not marked is what we care about here).
- assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
+ assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
// Setup monitor fields to proper values -- prepare the monitor
m->set_header(dmw);
- // Optimization: if the mark->locker stack address is associated
+ // Optimization: if the mark.locker stack address is associated
// with this thread we could simply set m->_owner = Self.
// Note that a thread can inflate an object
// that it has stack-locked -- as might happen in wait() -- directly
// with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
- m->set_owner(mark->locker());
+ m->set_owner(mark.locker());
m->set_object(object);
// TODO-FIXME: assert BasicLock->dhw != 0.
// Must preserve store ordering. The monitor state must
// be stable at the time of publishing the monitor address.
- guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
- object->release_set_mark(markOopDesc::encode(m));
+ guarantee(object->mark() == markWord::INFLATING(), "invariant");
+ object->release_set_mark(markWord::encode(m));
// Hopefully the performance counters are allocated on distinct cache lines
// to avoid false sharing on MP systems ...
@@ -1447,7 +1448,7 @@
ResourceMark rm(Self);
lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
- p2i(object->mark()), object->klass()->external_name());
+ object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
@@ -1467,7 +1468,7 @@
// Catch if the object's header is not neutral (not locked and
// not marked is what we care about here).
- assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
+ assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
ObjectMonitor * m = omAlloc(Self);
// prepare m for installation - set monitor to initial state
m->Recycle();
@@ -1476,8 +1477,8 @@
m->_Responsible = NULL;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
- if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
- m->set_header(NULL);
+ if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
+ m->set_header(markWord::zero());
m->set_object(NULL);
m->Recycle();
omRelease(Self, m, true);
@@ -1495,7 +1496,7 @@
ResourceMark rm(Self);
lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
- p2i(object->mark()), object->klass()->external_name());
+ object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
@@ -1533,15 +1534,15 @@
ObjectMonitor** freeTailp) {
bool deflated;
// Normal case ... The monitor is associated with obj.
- const markOop mark = obj->mark();
- guarantee(mark == markOopDesc::encode(mid), "should match: mark="
- INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
- p2i(markOopDesc::encode(mid)));
- // Make sure that mark->monitor() and markOopDesc::encode() agree:
- guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
- ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
- const markOop dmw = mid->header();
- guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
+ const markWord mark = obj->mark();
+ guarantee(mark == markWord::encode(mid), "should match: mark="
+ INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
+ markWord::encode(mid).value());
+ // Make sure that mark.monitor() and markWord::encode() agree:
+ guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
+ ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
+ const markWord dmw = mid->header();
+ guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
if (mid->is_busy()) {
deflated = false;
@@ -1554,7 +1555,7 @@
log_trace(monitorinflation)("deflate_monitor: "
"object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(obj),
- p2i(mark), obj->klass()->external_name());
+ mark.value(), obj->klass()->external_name());
}
// Restore the header back to obj
@@ -1935,16 +1936,16 @@
}
*error_cnt_p = *error_cnt_p + 1;
}
- if (n->header() != NULL) {
+ if (n->header().value() != 0) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": free per-thread monitor must have NULL _header "
"field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
- p2i(n->header()));
+ n->header().value());
} else {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
"must have NULL _header field: _header=" INTPTR_FORMAT,
- p2i(n), p2i(n->header()));
+ p2i(n), n->header().value());
}
*error_cnt_p = *error_cnt_p + 1;
}
@@ -2003,7 +2004,7 @@
// Check an in-use monitor entry; log any errors.
void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n,
outputStream * out, int *error_cnt_p) {
- if (n->header() == NULL) {
+ if (n->header().value() == 0) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": in-use per-thread monitor must have non-NULL _header "
@@ -2026,34 +2027,34 @@
*error_cnt_p = *error_cnt_p + 1;
}
const oop obj = (oop)n->object();
- const markOop mark = obj->mark();
- if (!mark->has_monitor()) {
+ const markWord mark = obj->mark();
+ if (!mark.has_monitor()) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": in-use per-thread monitor's object does not think "
"it has a monitor: obj=" INTPTR_FORMAT ", mark="
- INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), p2i(mark));
+ INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value());
} else {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
"monitor's object does not think it has a monitor: obj="
INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
- p2i(obj), p2i(mark));
+ p2i(obj), mark.value());
}
*error_cnt_p = *error_cnt_p + 1;
}
- ObjectMonitor * const obj_mon = mark->monitor();
+ ObjectMonitor * const obj_mon = mark.monitor();
if (n != obj_mon) {
if (jt != NULL) {
out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
": in-use per-thread monitor's object does not refer "
"to the same monitor: obj=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
- p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
+ p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
} else {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
"monitor's object does not refer to the same monitor: obj="
INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
- INTPTR_FORMAT, p2i(n), p2i(obj), p2i(mark), p2i(obj_mon));
+ INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
}
*error_cnt_p = *error_cnt_p + 1;
}
@@ -2119,10 +2120,10 @@
out->print_cr("================== === ================== ==================");
for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) {
const oop obj = (oop) n->object();
- const markOop mark = n->header();
+ const markWord mark = n->header();
ResourceMark rm;
out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n),
- n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL,
+ n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
p2i(obj), obj->klass()->external_name());
if (n->is_busy() != 0) {
out->print(" (%s)", n->is_busy_to_string(&ss));
@@ -2144,11 +2145,11 @@
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) {
const oop obj = (oop) n->object();
- const markOop mark = n->header();
+ const markWord mark = n->header();
ResourceMark rm;
out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT
" %s", p2i(jt), p2i(n), n->is_busy() != 0,
- mark->hash() != 0, n->owner() != NULL, p2i(obj),
+ mark.hash() != 0, n->owner() != NULL, p2i(obj),
obj->klass()->external_name());
if (n->is_busy() != 0) {
out->print(" (%s)", n->is_busy_to_string(&ss));
--- a/src/hotspot/share/runtime/thread.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/thread.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -173,7 +173,7 @@
// Support for forcing alignment of thread objects for biased locking
void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
if (UseBiasedLocking) {
- const int alignment = markOopDesc::biased_lock_alignment;
+ const int alignment = markWord::biased_lock_alignment;
size_t aligned_size = size + (alignment - sizeof(intptr_t));
void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
: AllocateHeap(aligned_size, flags, CURRENT_PC,
@@ -301,9 +301,9 @@
#endif // CHECK_UNHANDLED_OOPS
#ifdef ASSERT
if (UseBiasedLocking) {
- assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
+ assert((((uintptr_t) this) & (markWord::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
assert(this == _real_malloc_address ||
- this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
+ this == align_up(_real_malloc_address, (int)markWord::biased_lock_alignment),
"bug in forced alignment of thread objects");
}
#endif // ASSERT
--- a/src/hotspot/share/runtime/vframe.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/vframe.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -224,7 +224,7 @@
if (monitor->owner() != NULL) {
// the monitor is associated with an object, i.e., it is locked
- markOop mark = NULL;
+ markWord mark(markWord::zero());
const char *lock_state = "locked"; // assume we have the monitor locked
if (!found_first_monitor && frame_count == 0) {
// If this is the first frame and we haven't found an owned
@@ -233,17 +233,17 @@
// an inflated monitor that is first on the monitor list in
// the first frame can block us on a monitor enter.
mark = monitor->owner()->mark();
- if (mark->has_monitor() &&
+ if (mark.has_monitor() &&
( // we have marked ourself as pending on this monitor
- mark->monitor() == thread()->current_pending_monitor() ||
+ mark.monitor() == thread()->current_pending_monitor() ||
// we are not the owner of this monitor
- !mark->monitor()->is_entered(thread())
+ !mark.monitor()->is_entered(thread())
)) {
lock_state = "waiting to lock";
} else {
// We own the monitor which is not as interesting so
// disable the extra printing below.
- mark = NULL;
+ mark = markWord::zero();
}
}
print_locked_object_class_name(st, Handle(THREAD, monitor->owner()), lock_state);
--- a/src/hotspot/share/runtime/vmStructs.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/src/hotspot/share/runtime/vmStructs.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -201,7 +201,7 @@
/* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \
/******************************************************************/ \
\
- volatile_nonstatic_field(oopDesc, _mark, markOop) \
+ volatile_nonstatic_field(oopDesc, _mark, markWord) \
volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \
volatile_nonstatic_field(oopDesc, _metadata._compressed_klass, narrowKlass) \
static_field(BarrierSet, _barrier_set, BarrierSet*) \
@@ -264,7 +264,7 @@
nonstatic_field(Klass, _layout_helper, jint) \
nonstatic_field(Klass, _name, Symbol*) \
nonstatic_field(Klass, _access_flags, AccessFlags) \
- nonstatic_field(Klass, _prototype_header, markOop) \
+ nonstatic_field(Klass, _prototype_header, markWord) \
volatile_nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _next_link, Klass*) \
nonstatic_field(Klass, _vtable_len, int) \
@@ -906,14 +906,14 @@
/* Monitors */ \
/************/ \
\
- volatile_nonstatic_field(ObjectMonitor, _header, markOop) \
+ volatile_nonstatic_field(ObjectMonitor, _header, markWord) \
unchecked_nonstatic_field(ObjectMonitor, _object, sizeof(void *)) /* NOTE: no type */ \
unchecked_nonstatic_field(ObjectMonitor, _owner, sizeof(void *)) /* NOTE: no type */ \
volatile_nonstatic_field(ObjectMonitor, _contentions, jint) \
volatile_nonstatic_field(ObjectMonitor, _waiters, jint) \
volatile_nonstatic_field(ObjectMonitor, _recursions, intptr_t) \
nonstatic_field(ObjectMonitor, FreeNext, ObjectMonitor*) \
- volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
+ volatile_nonstatic_field(BasicLock, _displaced_header, markWord) \
nonstatic_field(BasicObjectLock, _lock, BasicLock) \
nonstatic_field(BasicObjectLock, _obj, oop) \
static_ptr_volatile_field(ObjectSynchronizer, gBlockList, PaddedObjectMonitor*) \
@@ -1267,7 +1267,6 @@
declare_type(arrayOopDesc, oopDesc) \
declare_type(objArrayOopDesc, arrayOopDesc) \
declare_type(instanceOopDesc, oopDesc) \
- declare_type(markOopDesc, oopDesc) \
\
/**************************************************/ \
/* MetadataOopDesc hierarchy (NOTE: some missing) */ \
@@ -1305,7 +1304,6 @@
/* Oops */ \
/********/ \
\
- declare_oop_type(markOop) \
declare_oop_type(objArrayOop) \
declare_oop_type(oop) \
declare_oop_type(narrowOop) \
@@ -1955,9 +1953,10 @@
declare_toplevel_type(BitMap) \
declare_type(BitMapView, BitMap) \
\
- declare_integer_type(AccessFlags) /* FIXME: wrong type (not integer) */\
+ declare_integer_type(markWord) \
+ declare_integer_type(AccessFlags) /* FIXME: wrong type (not integer) */\
declare_toplevel_type(address) /* FIXME: should this be an integer type? */\
- declare_integer_type(BasicType) /* FIXME: wrong type (not integer) */\
+ declare_integer_type(BasicType) /* FIXME: wrong type (not integer) */ \
declare_toplevel_type(BreakpointInfo) \
declare_toplevel_type(BreakpointInfo*) \
declare_toplevel_type(CodeBlob*) \
@@ -2630,52 +2629,52 @@
VM_LONG_CONSTANTS_GC(declare_constant) \
\
/*********************/ \
- /* MarkOop constants */ \
+ /* markWord constants */ \
/*********************/ \
\
/* Note: some of these are declared as long constants just for */ \
/* consistency. The mask constants are the only ones requiring */ \
/* 64 bits (on 64-bit platforms). */ \
\
- declare_constant(markOopDesc::age_bits) \
- declare_constant(markOopDesc::lock_bits) \
- declare_constant(markOopDesc::biased_lock_bits) \
- declare_constant(markOopDesc::max_hash_bits) \
- declare_constant(markOopDesc::hash_bits) \
+ declare_constant(markWord::age_bits) \
+ declare_constant(markWord::lock_bits) \
+ declare_constant(markWord::biased_lock_bits) \
+ declare_constant(markWord::max_hash_bits) \
+ declare_constant(markWord::hash_bits) \
\
- declare_constant(markOopDesc::lock_shift) \
- declare_constant(markOopDesc::biased_lock_shift) \
- declare_constant(markOopDesc::age_shift) \
- declare_constant(markOopDesc::hash_shift) \
+ declare_constant(markWord::lock_shift) \
+ declare_constant(markWord::biased_lock_shift) \
+ declare_constant(markWord::age_shift) \
+ declare_constant(markWord::hash_shift) \
\
- declare_constant(markOopDesc::lock_mask) \
- declare_constant(markOopDesc::lock_mask_in_place) \
- declare_constant(markOopDesc::biased_lock_mask) \
- declare_constant(markOopDesc::biased_lock_mask_in_place) \
- declare_constant(markOopDesc::biased_lock_bit_in_place) \
- declare_constant(markOopDesc::age_mask) \
- declare_constant(markOopDesc::age_mask_in_place) \
- declare_constant(markOopDesc::epoch_mask) \
- declare_constant(markOopDesc::epoch_mask_in_place) \
- declare_constant(markOopDesc::hash_mask) \
- declare_constant(markOopDesc::hash_mask_in_place) \
- declare_constant(markOopDesc::biased_lock_alignment) \
+ declare_constant(markWord::lock_mask) \
+ declare_constant(markWord::lock_mask_in_place) \
+ declare_constant(markWord::biased_lock_mask) \
+ declare_constant(markWord::biased_lock_mask_in_place) \
+ declare_constant(markWord::biased_lock_bit_in_place) \
+ declare_constant(markWord::age_mask) \
+ declare_constant(markWord::age_mask_in_place) \
+ declare_constant(markWord::epoch_mask) \
+ declare_constant(markWord::epoch_mask_in_place) \
+ declare_constant(markWord::hash_mask) \
+ declare_constant(markWord::hash_mask_in_place) \
+ declare_constant(markWord::biased_lock_alignment) \
\
- declare_constant(markOopDesc::locked_value) \
- declare_constant(markOopDesc::unlocked_value) \
- declare_constant(markOopDesc::monitor_value) \
- declare_constant(markOopDesc::marked_value) \
- declare_constant(markOopDesc::biased_lock_pattern) \
+ declare_constant(markWord::locked_value) \
+ declare_constant(markWord::unlocked_value) \
+ declare_constant(markWord::monitor_value) \
+ declare_constant(markWord::marked_value) \
+ declare_constant(markWord::biased_lock_pattern) \
\
- declare_constant(markOopDesc::no_hash) \
- declare_constant(markOopDesc::no_hash_in_place) \
- declare_constant(markOopDesc::no_lock_in_place) \
- declare_constant(markOopDesc::max_age) \
+ declare_constant(markWord::no_hash) \
+ declare_constant(markWord::no_hash_in_place) \
+ declare_constant(markWord::no_lock_in_place) \
+ declare_constant(markWord::max_age) \
\
- /* Constants in markOop used by CMS. */ \
- declare_constant(markOopDesc::cms_shift) \
- declare_constant(markOopDesc::cms_mask) \
- declare_constant(markOopDesc::size_shift) \
+ /* Constants in markWord used by CMS. */ \
+ declare_constant(markWord::cms_shift) \
+ declare_constant(markWord::cms_mask) \
+ declare_constant(markWord::size_shift) \
\
/* InvocationCounter constants */ \
declare_constant(InvocationCounter::count_increment) \
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Fri Aug 16 09:18:41 2019 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Tue Aug 06 10:48:21 2019 +0200
@@ -655,14 +655,7 @@
}
if (isOopType) {
- // HACK: turn markOop into a C integer type. This allows
- // proper handling of it in the Serviceability Agent. (FIXME
- // -- consider doing something different here)
- if (typeName.equals("markOop")) {
- type = new BasicCIntegerType(this, typeName, true);
- } else {
- type.setIsOopType(true);
- }
+ type.setIsOopType(true);
}
}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java Fri Aug 16 09:18:41 2019 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java Tue Aug 06 10:48:21 2019 +0200
@@ -32,12 +32,6 @@
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
-/** Mark is the analogue of the VM's markOop. In this system it does
- not subclass Oop but VMObject. For a mark on the stack, the mark's
- address will be an Address; for a mark in the header of an object,
- it will be an OopHandle. It is assumed in a couple of places in
- this code that the mark is the first word in an object. */
-
public class Mark extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
@@ -51,39 +45,39 @@
Type type = db.lookupType("oopDesc");
markField = type.getCIntegerField("_mark");
- ageBits = db.lookupLongConstant("markOopDesc::age_bits").longValue();
- lockBits = db.lookupLongConstant("markOopDesc::lock_bits").longValue();
- biasedLockBits = db.lookupLongConstant("markOopDesc::biased_lock_bits").longValue();
- maxHashBits = db.lookupLongConstant("markOopDesc::max_hash_bits").longValue();
- hashBits = db.lookupLongConstant("markOopDesc::hash_bits").longValue();
- lockShift = db.lookupLongConstant("markOopDesc::lock_shift").longValue();
- biasedLockShift = db.lookupLongConstant("markOopDesc::biased_lock_shift").longValue();
- ageShift = db.lookupLongConstant("markOopDesc::age_shift").longValue();
- hashShift = db.lookupLongConstant("markOopDesc::hash_shift").longValue();
- lockMask = db.lookupLongConstant("markOopDesc::lock_mask").longValue();
- lockMaskInPlace = db.lookupLongConstant("markOopDesc::lock_mask_in_place").longValue();
- biasedLockMask = db.lookupLongConstant("markOopDesc::biased_lock_mask").longValue();
- biasedLockMaskInPlace = db.lookupLongConstant("markOopDesc::biased_lock_mask_in_place").longValue();
- biasedLockBitInPlace = db.lookupLongConstant("markOopDesc::biased_lock_bit_in_place").longValue();
- ageMask = db.lookupLongConstant("markOopDesc::age_mask").longValue();
- ageMaskInPlace = db.lookupLongConstant("markOopDesc::age_mask_in_place").longValue();
- hashMask = db.lookupLongConstant("markOopDesc::hash_mask").longValue();
- hashMaskInPlace = db.lookupLongConstant("markOopDesc::hash_mask_in_place").longValue();
- biasedLockAlignment = db.lookupLongConstant("markOopDesc::biased_lock_alignment").longValue();
- lockedValue = db.lookupLongConstant("markOopDesc::locked_value").longValue();
- unlockedValue = db.lookupLongConstant("markOopDesc::unlocked_value").longValue();
- monitorValue = db.lookupLongConstant("markOopDesc::monitor_value").longValue();
- markedValue = db.lookupLongConstant("markOopDesc::marked_value").longValue();
- biasedLockPattern = db.lookupLongConstant("markOopDesc::biased_lock_pattern").longValue();
- noHash = db.lookupLongConstant("markOopDesc::no_hash").longValue();
- noHashInPlace = db.lookupLongConstant("markOopDesc::no_hash_in_place").longValue();
- noLockInPlace = db.lookupLongConstant("markOopDesc::no_lock_in_place").longValue();
- maxAge = db.lookupLongConstant("markOopDesc::max_age").longValue();
+ ageBits = db.lookupLongConstant("markWord::age_bits").longValue();
+ lockBits = db.lookupLongConstant("markWord::lock_bits").longValue();
+ biasedLockBits = db.lookupLongConstant("markWord::biased_lock_bits").longValue();
+ maxHashBits = db.lookupLongConstant("markWord::max_hash_bits").longValue();
+ hashBits = db.lookupLongConstant("markWord::hash_bits").longValue();
+ lockShift = db.lookupLongConstant("markWord::lock_shift").longValue();
+ biasedLockShift = db.lookupLongConstant("markWord::biased_lock_shift").longValue();
+ ageShift = db.lookupLongConstant("markWord::age_shift").longValue();
+ hashShift = db.lookupLongConstant("markWord::hash_shift").longValue();
+ lockMask = db.lookupLongConstant("markWord::lock_mask").longValue();
+ lockMaskInPlace = db.lookupLongConstant("markWord::lock_mask_in_place").longValue();
+ biasedLockMask = db.lookupLongConstant("markWord::biased_lock_mask").longValue();
+ biasedLockMaskInPlace = db.lookupLongConstant("markWord::biased_lock_mask_in_place").longValue();
+ biasedLockBitInPlace = db.lookupLongConstant("markWord::biased_lock_bit_in_place").longValue();
+ ageMask = db.lookupLongConstant("markWord::age_mask").longValue();
+ ageMaskInPlace = db.lookupLongConstant("markWord::age_mask_in_place").longValue();
+ hashMask = db.lookupLongConstant("markWord::hash_mask").longValue();
+ hashMaskInPlace = db.lookupLongConstant("markWord::hash_mask_in_place").longValue();
+ biasedLockAlignment = db.lookupLongConstant("markWord::biased_lock_alignment").longValue();
+ lockedValue = db.lookupLongConstant("markWord::locked_value").longValue();
+ unlockedValue = db.lookupLongConstant("markWord::unlocked_value").longValue();
+ monitorValue = db.lookupLongConstant("markWord::monitor_value").longValue();
+ markedValue = db.lookupLongConstant("markWord::marked_value").longValue();
+ biasedLockPattern = db.lookupLongConstant("markWord::biased_lock_pattern").longValue();
+ noHash = db.lookupLongConstant("markWord::no_hash").longValue();
+ noHashInPlace = db.lookupLongConstant("markWord::no_hash_in_place").longValue();
+ noLockInPlace = db.lookupLongConstant("markWord::no_lock_in_place").longValue();
+ maxAge = db.lookupLongConstant("markWord::max_age").longValue();
- /* Constants in markOop used by CMS. */
- cmsShift = db.lookupLongConstant("markOopDesc::cms_shift").longValue();
- cmsMask = db.lookupLongConstant("markOopDesc::cms_mask").longValue();
- sizeShift = db.lookupLongConstant("markOopDesc::size_shift").longValue();
+ /* Constants in markWord used by CMS. */
+ cmsShift = db.lookupLongConstant("markWord::cms_shift").longValue();
+ cmsMask = db.lookupLongConstant("markWord::cms_mask").longValue();
+ sizeShift = db.lookupLongConstant("markWord::size_shift").longValue();
}
// Field accessors
@@ -125,7 +119,7 @@
private static long maxAge;
- /* Constants in markOop used by CMS. */
+ /* Constants in markWord used by CMS. */
private static long cmsShift;
private static long cmsMask;
private static long sizeShift;
@@ -175,7 +169,7 @@
return (Bits.maskBitsLong(value(), lockMaskInPlace) == markedValue);
}
- // Special temporary state of the markOop while being inflated.
+ // Special temporary state of the markWord while being inflated.
// Code that looks at mark outside a lock need to take this into account.
public boolean isBeingInflated() {
return (value() == 0);
@@ -188,12 +182,8 @@
// WARNING: The following routines are used EXCLUSIVELY by
// synchronization functions. They are not really gc safe.
- // They must get updated if markOop layout get changed.
+ // They must get updated if markWord layout get changed.
- // FIXME
- // markOop set_unlocked() const {
- // return markOop(value() | unlocked_value);
- // }
public boolean hasLocker() {
return ((value() & lockMaskInPlace) == lockedValue);
}
@@ -224,44 +214,7 @@
Address addr = valueAsAddress().andWithMask(~monitorValue);
return new Mark(addr.getAddressAt(0));
}
- // FIXME
- // void set_displaced_mark_helper(markOop m) const {
- // assert(has_displaced_mark_helper(), "check");
- // intptr_t ptr = (value() & ~monitor_value);
- // *(markOop*)ptr = m;
- // }
- // markOop copy_set_hash(intptr_t hash) const {
- // intptr_t tmp = value() & (~hash_mask_in_place);
- // tmp |= ((hash & hash_mask) << hash_shift);
- // return (markOop)tmp;
- // }
- // it is only used to be stored into BasicLock as the
- // indicator that the lock is using heavyweight monitor
- // static markOop unused_mark() {
- // return (markOop) marked_value;
- // }
- // // the following two functions create the markOop to be
- // // stored into object header, it encodes monitor info
- // static markOop encode(BasicLock* lock) {
- // return (markOop) lock;
- // }
- // static markOop encode(ObjectMonitor* monitor) {
- // intptr_t tmp = (intptr_t) monitor;
- // return (markOop) (tmp | monitor_value);
- // }
- // used for alignment-based marking to reuse the busy state to encode pointers
- // (see markOop_alignment.hpp)
- // markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
- //
- // // age operations
- // markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
- //
public int age() { return (int) Bits.maskBitsLong(value() >> ageShift, ageMask); }
- // markOop set_age(int v) const {
- // assert((v & ~age_mask) == 0, "shouldn't overflow age field");
- // return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
- // }
- // markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
// hash operations
public long hash() {
@@ -272,12 +225,6 @@
return hash() == noHash;
}
- // FIXME
- // Prototype mark for initialization
- // static markOop prototype() {
- // return markOop( no_hash_in_place | no_lock_in_place );
- // }
-
// Debugging
public void printOn(PrintStream tty) {
if (isLocked()) {
@@ -294,14 +241,7 @@
}
}
- // FIXME
- // // Prepare address of oop for placement into mark
- // inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
- //
- // // Recover address of oop from encoded form used in mark
- // inline void* decode_pointer() { return clear_lock_bits(); }
-
- // Copy markOop methods for CMS here.
+ // Copy markWord methods for CMS here.
public boolean isCmsFreeChunk() {
return isUnlocked() &&
(Bits.maskBitsLong(value() >> cmsShift, cmsMask) & 0x1L) == 0x1L;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java Fri Aug 16 09:18:41 2019 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java Tue Aug 06 10:48:21 2019 +0200
@@ -64,7 +64,7 @@
}
// FIXME
- // void set_header(markOop hdr);
+ // void set_header(markWord hdr);
// FIXME: must implement and delegate to platform-dependent implementation
// public boolean isBusy();
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Fri Aug 16 09:18:41 2019 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Tue Aug 06 10:48:21 2019 +0200
@@ -73,7 +73,7 @@
final int hubOffset = getFieldOffset("oopDesc::_metadata._klass", Integer.class, "Klass*");
- final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markOop");
+ final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markWord");
final int subklassOffset = getFieldOffset("Klass::_subklass", Integer.class, "Klass*");
final int superOffset = getFieldOffset("Klass::_super", Integer.class, "Klass*");
final int nextSiblingOffset = getFieldOffset("Klass::_next_sibling", Integer.class, "Klass*");
@@ -142,11 +142,11 @@
// This is only valid on AMD64.
final int runtimeCallStackSize = getConstant("frame::arg_reg_save_area_bytes", Integer.class, osArch.equals("amd64") ? null : 0);
- private final int markWordNoHashInPlace = getConstant("markOopDesc::no_hash_in_place", Integer.class);
- private final int markWordNoLockInPlace = getConstant("markOopDesc::no_lock_in_place", Integer.class);
+ private final int markWordNoHashInPlace = getConstant("markWord::no_hash_in_place", Integer.class);
+ private final int markWordNoLockInPlace = getConstant("markWord::no_lock_in_place", Integer.class);
/**
- * See {@code markOopDesc::prototype()}.
+ * See {@code markWord::prototype()}.
*/
long arrayPrototypeMarkWord() {
return markWordNoHashInPlace | markWordNoLockInPlace;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Fri Aug 16 09:18:41 2019 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Tue Aug 06 10:48:21 2019 +0200
@@ -208,10 +208,10 @@
public final int stackBias = getConstant("STACK_BIAS", Integer.class);
public final int vmPageSize = getFieldValue("CompilerToVM::Data::vm_page_size", Integer.class, "int");
- public final int markOffset = getFieldOffset("oopDesc::_mark", Integer.class, "markOop");
+ public final int markOffset = getFieldOffset("oopDesc::_mark", Integer.class, "markWord");
public final int hubOffset = getFieldOffset("oopDesc::_metadata._klass", Integer.class, "Klass*");
- public final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markOop");
+ public final int prototypeMarkWordOffset = getFieldOffset("Klass::_prototype_header", Integer.class, "markWord");
public final int subklassOffset = getFieldOffset("Klass::_subklass", Integer.class, "Klass*");
public final int nextSiblingOffset = getFieldOffset("Klass::_next_sibling", Integer.class, "Klass*");
public final int superCheckOffsetOffset = getFieldOffset("Klass::_super_check_offset", Integer.class, "juint");
@@ -445,17 +445,17 @@
public final int osThreadInterruptedOffset = getFieldOffset("OSThread::_interrupted", Integer.class, "jint");
- public final long markOopDescHashShift = getConstant("markOopDesc::hash_shift", Long.class);
+ public final long markWordHashShift = getConstant("markWord::hash_shift", Long.class);
- public final int biasedLockMaskInPlace = getConstant("markOopDesc::biased_lock_mask_in_place", Integer.class);
- public final int ageMaskInPlace = getConstant("markOopDesc::age_mask_in_place", Integer.class);
- public final int epochMaskInPlace = getConstant("markOopDesc::epoch_mask_in_place", Integer.class);
- public final long markOopDescHashMask = getConstant("markOopDesc::hash_mask", Long.class);
- public final long markOopDescHashMaskInPlace = getConstant("markOopDesc::hash_mask_in_place", Long.class);
+ public final int biasedLockMaskInPlace = getConstant("markWord::biased_lock_mask_in_place", Integer.class);
+ public final int ageMaskInPlace = getConstant("markWord::age_mask_in_place", Integer.class);
+ public final int epochMaskInPlace = getConstant("markWord::epoch_mask_in_place", Integer.class);
+ public final long markWordHashMask = getConstant("markWord::hash_mask", Long.class);
+ public final long markWordHashMaskInPlace = getConstant("markWord::hash_mask_in_place", Long.class);
- public final int unlockedMask = getConstant("markOopDesc::unlocked_value", Integer.class);
- public final int monitorMask = getConstant("markOopDesc::monitor_value", Integer.class, -1);
- public final int biasedLockPattern = getConstant("markOopDesc::biased_lock_pattern", Integer.class);
+ public final int unlockedMask = getConstant("markWord::unlocked_value", Integer.class);
+ public final int monitorMask = getConstant("markWord::monitor_value", Integer.class, -1);
+ public final int biasedLockPattern = getConstant("markWord::biased_lock_pattern", Integer.class);
// This field has no type in vmStructs.cpp
public final int objectMonitorOwner = getFieldOffset("ObjectMonitor::_owner", Integer.class, null, -1);
@@ -464,34 +464,34 @@
public final int objectMonitorEntryList = getFieldOffset("ObjectMonitor::_EntryList", Integer.class, "ObjectWaiter*", -1);
public final int objectMonitorSucc = getFieldOffset("ObjectMonitor::_succ", Integer.class, "Thread*", -1);
- public final int markWordNoHashInPlace = getConstant("markOopDesc::no_hash_in_place", Integer.class);
- public final int markWordNoLockInPlace = getConstant("markOopDesc::no_lock_in_place", Integer.class);
+ public final int markWordNoHashInPlace = getConstant("markWord::no_hash_in_place", Integer.class);
+ public final int markWordNoLockInPlace = getConstant("markWord::no_lock_in_place", Integer.class);
/**
- * See {@code markOopDesc::prototype()}.
+ * See {@code markWord::prototype()}.
*/
public long arrayPrototypeMarkWord() {
return markWordNoHashInPlace | markWordNoLockInPlace;
}
/**
- * See {@code markOopDesc::copy_set_hash()}.
+ * See {@code markWord::copy_set_hash()}.
*/
public long tlabIntArrayMarkWord() {
- long tmp = arrayPrototypeMarkWord() & (~markOopDescHashMaskInPlace);
- tmp |= ((0x2 & markOopDescHashMask) << markOopDescHashShift);
+ long tmp = arrayPrototypeMarkWord() & (~markWordHashMaskInPlace);
+ tmp |= ((0x2 & markWordHashMask) << markWordHashShift);
return tmp;
}
/**
* Mark word right shift to get identity hash code.
*/
- public final int identityHashCodeShift = getConstant("markOopDesc::hash_shift", Integer.class);
+ public final int identityHashCodeShift = getConstant("markWord::hash_shift", Integer.class);
/**
* Identity hash code value when uninitialized.
*/
- public final int uninitializedIdentityHashCodeValue = getConstant("markOopDesc::no_hash", Integer.class);
+ public final int uninitializedIdentityHashCodeValue = getConstant("markWord::no_hash", Integer.class);
public final int methodAccessFlagsOffset = getFieldOffset("Method::_access_flags", Integer.class, "AccessFlags");
public final int methodConstMethodOffset = getFieldOffset("Method::_constMethod", Integer.class, "ConstMethod*");
@@ -565,7 +565,7 @@
public final int arrayKlassOffset = getFieldValue("java_lang_Class::_array_klass_offset", Integer.class, "int");
public final int basicLockSize = getFieldValue("CompilerToVM::Data::sizeof_BasicLock", Integer.class, "int");
- public final int basicLockDisplacedHeaderOffset = getFieldOffset("BasicLock::_displaced_header", Integer.class, "markOop");
+ public final int basicLockDisplacedHeaderOffset = getFieldOffset("BasicLock::_displaced_header", Integer.class, "markWord");
public final int threadPollingPageOffset = getFieldOffset("Thread::_polling_page", Integer.class, "address", -1);
public final int threadAllocatedBytesOffset = getFieldOffset("Thread::_allocated_bytes", Integer.class, "jlong");
--- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -42,17 +42,19 @@
FakeOop() : _oop() { _oop.set_mark_raw(originalMark()); }
oop get_oop() { return &_oop; }
- markOop mark() { return _oop.mark_raw(); }
- void set_mark(markOop m) { _oop.set_mark_raw(m); }
+ markWord mark() { return _oop.mark_raw(); }
+ void set_mark(markWord m) { _oop.set_mark_raw(m); }
void forward_to(oop obj) {
- markOop m = markOopDesc::encode_pointer_as_mark(obj);
+ markWord m = markWord::encode_pointer_as_mark(obj);
_oop.set_mark_raw(m);
}
- static markOop originalMark() { return markOop(markOopDesc::lock_mask_in_place); }
- static markOop changedMark() { return markOop(0x4711); }
+ static markWord originalMark() { return markWord(markWord::lock_mask_in_place); }
+ static markWord changedMark() { return markWord(0x4711); }
};
+#define ASSERT_MARK_WORD_EQ(a, b) ASSERT_EQ((a).value(), (b).value())
+
TEST_VM(PreservedMarks, iterate_and_restore) {
// Need to disable biased locking to easily
// create oops that "must_be_preseved"
@@ -65,16 +67,16 @@
FakeOop o4;
// Make sure initial marks are correct.
- ASSERT_EQ(o1.mark(), FakeOop::originalMark());
- ASSERT_EQ(o2.mark(), FakeOop::originalMark());
- ASSERT_EQ(o3.mark(), FakeOop::originalMark());
- ASSERT_EQ(o4.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::originalMark());
+ ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::originalMark());
// Change the marks and verify change.
o1.set_mark(FakeOop::changedMark());
o2.set_mark(FakeOop::changedMark());
- ASSERT_EQ(o1.mark(), FakeOop::changedMark());
- ASSERT_EQ(o2.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::changedMark());
// Push o1 and o2 to have their marks preserved.
pm.push(o1.get_oop(), o1.mark());
@@ -92,6 +94,6 @@
// Restore all preserved and verify that the changed
// mark is now present at o3 and o4.
pm.restore();
- ASSERT_EQ(o3.mark(), FakeOop::changedMark());
- ASSERT_EQ(o4.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::changedMark());
+ ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::changedMark());
}
--- a/test/hotspot/gtest/oops/test_markOop.cpp Fri Aug 16 09:18:41 2019 +0200
+++ b/test/hotspot/gtest/oops/test_markOop.cpp Tue Aug 06 10:48:21 2019 +0200
@@ -79,7 +79,7 @@
};
-TEST_VM(markOopDesc, printing) {
+TEST_VM(markWord, printing) {
JavaThread* THREAD = JavaThread::current();
ThreadInVMfromNative invm(THREAD);
ResourceMark rm(THREAD);
@@ -98,10 +98,10 @@
// Lock using biased locking.
BasicObjectLock lock;
lock.set_obj(obj);
- markOop mark = obj->mark()->incr_bias_epoch();
+ markWord mark = obj->mark().incr_bias_epoch();
obj->set_mark(mark);
ObjectSynchronizer::fast_enter(h_obj, lock.lock(), true, THREAD);
- // Look for the biased_locker in markOop, not prototype_header.
+ // Look for the biased_locker in markWord, not prototype_header.
#ifdef _LP64
assert_not_test_pattern(h_obj, "mark(is_biased biased_locker=0x0000000000000000");
#else
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java Fri Aug 16 09:18:41 2019 +0200
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java Tue Aug 06 10:48:21 2019 +0200
@@ -56,7 +56,7 @@
"detach",
"universe",
"reattach",
- "longConstant markOopDesc::locked_value");
+ "longConstant markWord::locked_value");
Map<String, List<String>> expStrMap = new HashMap<>();
expStrMap.put("where", List.of(
@@ -65,8 +65,8 @@
"MaxJavaStackTraceDepth = "));
expStrMap.put("universe", List.of(
"Command not valid until attached to a VM"));
- expStrMap.put("longConstant markOopDesc::locked_value", List.of(
- "longConstant markOopDesc::locked_value"));
+ expStrMap.put("longConstant markWord::locked_value", List.of(
+ "longConstant markWord::locked_value"));
test.run(-1, cmds, expStrMap, null);
} catch (SkippedException se) {
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java Fri Aug 16 09:18:41 2019 +0200
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java Tue Aug 06 10:48:21 2019 +0200
@@ -51,21 +51,21 @@
List<String> cmds = List.of(
"longConstant",
- "longConstant markOopDesc::locked_value",
- "longConstant markOopDesc::lock_bits",
+ "longConstant markWord::locked_value",
+ "longConstant markWord::lock_bits",
"longConstant jtreg::test 6",
"longConstant jtreg::test");
Map<String, List<String>> expStrMap = new HashMap<>();
expStrMap.put("longConstant", List.of(
- "longConstant markOopDesc::locked_value",
- "longConstant markOopDesc::lock_bits",
+ "longConstant markWord::locked_value",
+ "longConstant markWord::lock_bits",
"InvocationCounter::count_increment",
- "markOopDesc::epoch_mask_in_place"));
- expStrMap.put("longConstant markOopDesc::locked_value", List.of(
- "longConstant markOopDesc::locked_value"));
- expStrMap.put("longConstant markOopDesc::lock_bits", List.of(
- "longConstant markOopDesc::lock_bits"));
+ "markWord::epoch_mask_in_place"));
+ expStrMap.put("longConstant markWord::locked_value", List.of(
+ "longConstant markWord::locked_value"));
+ expStrMap.put("longConstant markWord::lock_bits", List.of(
+ "longConstant markWord::lock_bits"));
expStrMap.put("longConstant jtreg::test", List.of(
"longConstant jtreg::test 6"));
@@ -93,12 +93,12 @@
// Expected output snippet is of the form (on x64-64):
// ...
// longConstant VM_Version::CPU_SHA 17179869184
- // longConstant markOopDesc::biased_lock_bits 1
- // longConstant markOopDesc::age_shift 3
- // longConstant markOopDesc::hash_mask_in_place 549755813632
+ // longConstant markWord::biased_lock_bits 1
+ // longConstant markWord::age_shift 3
+ // longConstant markWord::hash_mask_in_place 549755813632
// ...
- checkLongValue("markOopDesc::hash_mask_in_place",
+ checkLongValue("markWord::hash_mask_in_place",
longConstantOutput,
Platform.is64bit() ? 549755813632L: 4294967168L);