6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
Summary: decouple set_klass() with zeroing the gap when compressed.
Reviewed-by: kvn, ysr, jrose
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Wed May 28 21:06:24 2008 -0700
@@ -3421,13 +3421,15 @@
set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
// set klass to intArrayKlass
- set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
- ld_ptr(t2, 0, t2);
- store_klass(t2, top);
sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
st(t1, top, arrayOopDesc::length_offset_in_bytes());
+ set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
+ ld_ptr(t2, 0, t2);
+ // store klass last. concurrent gcs assumes klass length is valid if
+ // klass field is not null.
+ store_klass(t2, top);
verify_oop(top);
// refill the tlab with an eden allocation
@@ -3568,13 +3570,19 @@
if (UseCompressedOops) {
assert(dst_oop != klass, "not enough registers");
encode_heap_oop_not_null(klass);
- sllx(klass, BitsPerInt, klass);
- stx(klass, dst_oop, oopDesc::klass_offset_in_bytes());
+ st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
} else {
st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
}
}
+void MacroAssembler::store_klass_gap(Register s, Register d) {
+ if (UseCompressedOops) {
+ assert(s != d, "not enough registers");
+ st(s, d, oopDesc::klass_gap_offset_in_bytes());
+ }
+}
+
void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) {
if (UseCompressedOops) {
lduw(s, d, offset);
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Wed May 28 21:06:24 2008 -0700
@@ -1979,6 +1979,7 @@
// klass oop manipulations if compressed
void load_klass(Register src_oop, Register klass);
void store_klass(Register klass, Register dst_oop);
+ void store_klass_gap(Register s, Register dst_oop);
// oop manipulations
void load_heap_oop(const Address& s, Register d, int offset = 0);
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Wed May 28 21:06:24 2008 -0700
@@ -3222,7 +3222,8 @@
__ set((intptr_t)markOopDesc::prototype(), G4_scratch);
}
__ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
- __ store_klass(RinstanceKlass, RallocatedObject); // klass
+ __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
+ __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
{
SkipIfEqual skip_if(
--- a/hotspot/src/cpu/x86/vm/assembler_x86_64.cpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86_64.cpp Wed May 28 21:06:24 2008 -0700
@@ -4935,6 +4935,8 @@
movq(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
// set klass to intArrayKlass
movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
+ // store klass last. concurrent gcs assumes klass length is valid if
+ // klass field is not null.
store_klass(top, t1);
// refill the tlab with an eden allocation
@@ -5159,9 +5161,17 @@
void MacroAssembler::store_klass(Register dst, Register src) {
if (UseCompressedOops) {
encode_heap_oop_not_null(src);
- // Store to the wide klass field to zero the gap.
+ movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+ } else {
+ movq(Address(dst, oopDesc::klass_offset_in_bytes()), src);
}
- movq(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+}
+
+void MacroAssembler::store_klass_gap(Register dst, Register src) {
+ if (UseCompressedOops) {
+ // Store to klass gap in destination
+ movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
+ }
}
void MacroAssembler::load_heap_oop(Register dst, Address src) {
--- a/hotspot/src/cpu/x86/vm/assembler_x86_64.hpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86_64.hpp Wed May 28 21:06:24 2008 -0700
@@ -1109,6 +1109,7 @@
// oop manipulations
void load_klass(Register dst, Register src);
void store_klass(Register dst, Register src);
+ void store_klass_gap(Register dst, Register src);
void load_heap_oop(Register dst, Address src);
void store_heap_oop(Address dst, Register src);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Wed May 28 21:06:24 2008 -0700
@@ -3163,7 +3163,9 @@
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
(intptr_t) markOopDesc::prototype()); // header (address 0x1)
}
- __ store_klass(rax, rsi); // klass
+ __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
+ __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
+ __ store_klass(rax, rsi); // store klass last
__ jmp(done);
}
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed May 28 21:06:24 2008 -0700
@@ -1169,18 +1169,18 @@
// Trim off a prefix of at most objsFromOverflow items
int i = 1;
oop cur = prefix;
- while (i < objsFromOverflow && cur->klass() != NULL) {
+ while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
i++; cur = oop(cur->klass());
}
// Reattach remaining (suffix) to overflow list
- if (cur->klass() != NULL) {
+ if (cur->klass_or_null() != NULL) {
oop suffix = oop(cur->klass());
cur->set_klass_to_list_ptr(NULL);
// Find last item of suffix list
oop last = suffix;
- while (last->klass() != NULL) {
+ while (last->klass_or_null() != NULL) {
last = oop(last->klass());
}
// Atomically prepend suffix to current overflow list
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Wed May 28 21:06:24 2008 -0700
@@ -90,11 +90,12 @@
HeapWord* obj,
size_t size,
int length) {
+ // Set array length before setting the _klass field
+ // in post_allocation_setup_common() because the klass field
+ // indicates that the object is parsable by concurrent GC.
assert(length >= 0, "length should be non-negative");
+ ((arrayOop)obj)->set_length(length);
post_allocation_setup_common(klass, obj, size);
- // Must set length after installing klass as set_klass zeros the length
- // field in UseCompressedOops
- ((arrayOop)obj)->set_length(length);
assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
// notify jvmti and dtrace (must be after length is set for dtrace)
post_allocation_notify(klass, (oop)obj);
@@ -224,6 +225,7 @@
assert(obj != NULL, "cannot initialize NULL object");
const size_t hs = oopDesc::header_size();
assert(size >= hs, "unexpected object size");
+ ((oop)obj)->set_klass_gap(0);
Copy::fill_to_aligned_words(obj + hs, size - hs);
}
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed May 28 21:06:24 2008 -0700
@@ -1931,6 +1931,7 @@
} else {
result->set_mark(markOopDesc::prototype());
}
+ result->set_klass_gap(0);
result->set_klass(k_entry);
SET_STACK_OBJECT(result, 0);
UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
--- a/hotspot/src/share/vm/memory/space.cpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/share/vm/memory/space.cpp Wed May 28 21:06:24 2008 -0700
@@ -815,6 +815,7 @@
"size for smallest fake object doesn't match");
instanceOop obj = (instanceOop) allocate(size);
obj->set_mark(markOopDesc::prototype());
+ obj->set_klass_gap(0);
obj->set_klass(SystemDictionary::object_klass());
}
}
--- a/hotspot/src/share/vm/oops/oop.hpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/share/vm/oops/oop.hpp Wed May 28 21:06:24 2008 -0700
@@ -77,10 +77,15 @@
void init_mark();
klassOop klass() const;
+ klassOop klass_or_null() const volatile;
oop* klass_addr();
narrowOop* compressed_klass_addr();
void set_klass(klassOop k);
+
+ // For klass field compression
+ int klass_gap() const;
+ void set_klass_gap(int z);
// For when the klass pointer is being used as a linked list "next" field.
void set_klass_to_list_ptr(oop k);
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Wed May 28 12:42:34 2008 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Wed May 28 21:06:24 2008 -0700
@@ -36,7 +36,15 @@
inline klassOop oopDesc::klass() const {
if (UseCompressedOops) {
return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
- // can be NULL in CMS, but isn't supported on CMS yet.
+ } else {
+ return _metadata._klass;
+ }
+}
+
+inline klassOop oopDesc::klass_or_null() const volatile {
+ // can be NULL in CMS
+ if (UseCompressedOops) {
+ return (klassOop)decode_heap_oop(_metadata._compressed_klass);
} else {
return _metadata._klass;
}
@@ -64,15 +72,22 @@
assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
if (UseCompressedOops) {
- // zero the gap when the klass is set, by zeroing the pointer sized
- // part of the union.
- _metadata._klass = NULL;
oop_store_without_check(compressed_klass_addr(), (oop)k);
} else {
oop_store_without_check(klass_addr(), (oop) k);
}
}
+inline int oopDesc::klass_gap() const {
+ return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
+}
+
+inline void oopDesc::set_klass_gap(int v) {
+ if (UseCompressedOops) {
+ *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
+ }
+}
+
inline void oopDesc::set_klass_to_list_ptr(oop k) {
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
@@ -505,7 +520,7 @@
// try to find metaclass cycle safely without seg faulting on bad input
// we should reach klassKlassObj by following klass link at most 3 times
for (int i = 0; i < 3; i++) {
- obj = obj->klass();
+ obj = obj->klass_or_null();
// klass should be aligned and in permspace
if (!check_obj_alignment(obj)) return false;
if (!Universe::heap()->is_in_permanent(obj)) return false;