--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java Thu Sep 19 09:26:08 2013 +0200
@@ -81,7 +81,7 @@
public Address getCompKlassAddressAt(long offset)
throws UnalignedAddressException, UnmappedAddressException {
- return debugger.readCompOopAddress(addr + offset);
+ return debugger.readCompKlassAddress(addr + offset);
}
//
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Sep 19 09:26:08 2013 +0200
@@ -792,7 +792,7 @@
public boolean isCompressedKlassPointersEnabled() {
if (compressedKlassPointersEnabled == null) {
- Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
+ Flag flag = getCommandLineFlag("UseCompressedClassPointers");
compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
(flag.getBool()? Boolean.TRUE: Boolean.FALSE);
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Sep 19 09:26:08 2013 +0200
@@ -66,18 +66,18 @@
printGCAlgorithm(flagMap);
System.out.println();
System.out.println("Heap Configuration:");
- printValue("MinHeapFreeRatio = ", getFlagValue("MinHeapFreeRatio", flagMap));
- printValue("MaxHeapFreeRatio = ", getFlagValue("MaxHeapFreeRatio", flagMap));
- printValMB("MaxHeapSize = ", getFlagValue("MaxHeapSize", flagMap));
- printValMB("NewSize = ", getFlagValue("NewSize", flagMap));
- printValMB("MaxNewSize = ", getFlagValue("MaxNewSize", flagMap));
- printValMB("OldSize = ", getFlagValue("OldSize", flagMap));
- printValue("NewRatio = ", getFlagValue("NewRatio", flagMap));
- printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap));
- printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap));
- printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
- printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap));
- printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
+ printValue("MinHeapFreeRatio = ", getFlagValue("MinHeapFreeRatio", flagMap));
+ printValue("MaxHeapFreeRatio = ", getFlagValue("MaxHeapFreeRatio", flagMap));
+ printValMB("MaxHeapSize = ", getFlagValue("MaxHeapSize", flagMap));
+ printValMB("NewSize = ", getFlagValue("NewSize", flagMap));
+ printValMB("MaxNewSize = ", getFlagValue("MaxNewSize", flagMap));
+ printValMB("OldSize = ", getFlagValue("OldSize", flagMap));
+ printValue("NewRatio = ", getFlagValue("NewRatio", flagMap));
+ printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap));
+ printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap));
+ printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
+ printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap));
+ printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
System.out.println();
System.out.println("Heap Usage:");
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -105,7 +105,7 @@
if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
}
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
}
@@ -963,7 +963,7 @@
case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
case T_ADDRESS:
#ifdef _LP64
- if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
+ if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
__ lduw(base, offset, to_reg->as_register());
__ decode_klass_not_null(to_reg->as_register());
} else
@@ -2208,7 +2208,7 @@
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
// We don't need decode because we just need to compare
__ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
@@ -2342,7 +2342,7 @@
// but not necessarily exactly of type default_type.
Label known_ok, halt;
metadata2reg(op->expected_type()->constant_encoding(), tmp);
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
// tmp holds the default type. It currently comes uncompressed after the
// load of a constant, so encode it.
__ encode_klass_not_null(tmp);
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -186,7 +186,7 @@
set((intx)markOopDesc::prototype(), t1);
}
st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
// Save klass
mov(klass, t1);
encode_klass_not_null(t1);
@@ -196,7 +196,7 @@
}
if (len->is_valid()) {
st(len, obj, arrayOopDesc::length_offset_in_bytes());
- } else if (UseCompressedKlassPointers) {
+ } else if (UseCompressedClassPointers) {
// otherwise length is in the class gap
store_klass_gap(G0, obj);
}
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -3911,7 +3911,7 @@
// The number of bytes in this code is used by
// MachCallDynamicJavaNode::ret_addr_offset()
// if this changes, change that.
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
decode_klass_not_null(klass);
} else {
@@ -3920,7 +3920,7 @@
}
void MacroAssembler::store_klass(Register klass, Register dst_oop) {
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
assert(dst_oop != klass, "not enough registers");
encode_klass_not_null(klass);
st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
@@ -3930,7 +3930,7 @@
}
void MacroAssembler::store_klass_gap(Register s, Register d) {
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
assert(s != d, "not enough registers");
st(s, d, oopDesc::klass_gap_offset_in_bytes());
}
@@ -4089,7 +4089,7 @@
}
void MacroAssembler::encode_klass_not_null(Register r) {
- assert (UseCompressedKlassPointers, "must be compressed");
+ assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
assert(r != G6_heapbase, "bad register choice");
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4105,7 +4105,7 @@
if (src == dst) {
encode_klass_not_null(src);
} else {
- assert (UseCompressedKlassPointers, "must be compressed");
+ assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
set((intptr_t)Universe::narrow_klass_base(), dst);
sub(src, dst, dst);
@@ -4119,7 +4119,7 @@
// generated by decode_klass_not_null() and reinit_heapbase(). Hence, if
// the instructions they generate change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() {
- assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+ assert (UseCompressedClassPointers, "only for compressed klass ptrs");
// set + add + set
int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
@@ -4135,7 +4135,7 @@
void MacroAssembler::decode_klass_not_null(Register r) {
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit.
- assert (UseCompressedKlassPointers, "must be compressed");
+ assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
assert(r != G6_heapbase, "bad register choice");
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4151,7 +4151,7 @@
} else {
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit.
- assert (UseCompressedKlassPointers, "must be compressed");
+ assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
if (Universe::narrow_klass_shift() != 0) {
assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
@@ -4167,7 +4167,7 @@
}
void MacroAssembler::reinit_heapbase() {
- if (UseCompressedOops || UseCompressedKlassPointers) {
+ if (UseCompressedOops || UseCompressedClassPointers) {
if (Universe::heap() != NULL) {
set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
} else {
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad Thu Sep 19 09:26:08 2013 +0200
@@ -557,7 +557,7 @@
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
int klass_load_size;
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
} else {
@@ -1657,7 +1657,7 @@
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
#ifdef _LP64
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
@@ -1897,7 +1897,7 @@
bool Matcher::narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
- assert(UseCompressedKlassPointers, "only for compressed klass code");
+ assert(UseCompressedClassPointers, "only for compressed klass code");
return false;
}
@@ -2561,7 +2561,7 @@
int off = __ offset();
__ load_klass(O0, G3_scratch);
int klass_load_size;
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
} else {
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -2945,7 +2945,7 @@
BLOCK_COMMENT("arraycopy argument klass checks");
// get src->klass()
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
__ delayed()->nop(); // ??? not good
__ load_klass(src, G3_src_klass);
} else {
@@ -2980,7 +2980,7 @@
// Load 32-bits signed value. Use br() instruction with it to check icc.
__ lduw(G3_src_klass, lh_offset, G5_lh);
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
__ load_klass(dst, G4_dst_klass);
}
// Handle objArrays completely differently...
@@ -2988,7 +2988,7 @@
__ set(objArray_lh, O5_temp);
__ cmp(G5_lh, O5_temp);
__ br(Assembler::equal, false, Assembler::pt, L_objArray);
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
__ delayed()->nop();
} else {
__ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -218,13 +218,13 @@
// ld;ld;ld,jmp,nop
const int basic = 5*BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
- (UseCompressedKlassPointers ?
+ (UseCompressedClassPointers ?
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
return basic + slop;
} else {
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
- (UseCompressedKlassPointers ?
+ (UseCompressedClassPointers ?
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
return (basic + slop);
}
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -148,7 +148,7 @@
static int adjust_reg_range(int range) {
// Reduce the number of available regs (to free r12) in case of compressed oops
- if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
+ if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
return range;
}
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -341,7 +341,7 @@
Register receiver = FrameMap::receiver_opr->as_register();
Register ic_klass = IC_Klass;
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
+ const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
if (!do_post_padding) {
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
@@ -1263,7 +1263,7 @@
break;
case T_ADDRESS:
- if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
+ if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
__ movl(dest->as_register(), from_addr);
} else {
__ movptr(dest->as_register(), from_addr);
@@ -1371,7 +1371,7 @@
__ verify_oop(dest->as_register());
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
__ decode_klass_not_null(dest->as_register());
}
#endif
@@ -1716,7 +1716,7 @@
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
- if (k->is_loaded() && !UseCompressedKlassPointers) {
+ if (k->is_loaded() && !UseCompressedClassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
@@ -1754,7 +1754,7 @@
// get object class
// not a safepoint as obj null check happens earlier
#ifdef _LP64
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
__ load_klass(Rtmp1, obj);
__ cmpptr(k_RInfo, Rtmp1);
} else {
@@ -3294,7 +3294,7 @@
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
__ movl(tmp, src_klass_addr);
__ cmpl(tmp, dst_klass_addr);
} else {
@@ -3456,21 +3456,21 @@
Label known_ok, halt;
__ mov_metadata(tmp, default_type->constant_encoding());
#ifdef _LP64
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp);
}
#endif
if (basic_type != T_OBJECT) {
- if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
+ if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
else __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::notEqual, halt);
- if (UseCompressedKlassPointers) __ cmpl(tmp, src_klass_addr);
+ if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
else __ cmpptr(tmp, src_klass_addr);
__ jcc(Assembler::equal, known_ok);
} else {
- if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
+ if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
else __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::equal, known_ok);
__ cmpptr(src, dst);
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -1239,7 +1239,7 @@
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
- if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+ if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ checkcast(reg, obj.result(), x->klass(),
@@ -1261,7 +1261,7 @@
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
- if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+ if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ instanceof(reg, obj.result(), x->klass(),
--- a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -157,7 +157,7 @@
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
}
#ifdef _LP64
- if (UseCompressedKlassPointers) { // Take care not to kill klass
+ if (UseCompressedClassPointers) { // Take care not to kill klass
movptr(t1, klass);
encode_klass_not_null(t1);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
@@ -171,7 +171,7 @@
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
}
#ifdef _LP64
- else if (UseCompressedKlassPointers) {
+ else if (UseCompressedClassPointers) {
xorptr(t1, t1);
store_klass_gap(obj, t1);
}
@@ -334,7 +334,7 @@
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
load_klass(rscratch1, receiver);
cmpptr(rscratch1, iCache);
} else {
@@ -345,7 +345,7 @@
jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
+ assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
}
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -1635,7 +1635,7 @@
#ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
// r12 is the heapbase.
- LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
+ LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
#endif // ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
@@ -4802,7 +4802,7 @@
void MacroAssembler::load_klass(Register dst, Register src) {
#ifdef _LP64
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else
@@ -4817,7 +4817,7 @@
void MacroAssembler::store_klass(Register dst, Register src) {
#ifdef _LP64
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
encode_klass_not_null(src);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
} else
@@ -4892,7 +4892,7 @@
#ifdef _LP64
void MacroAssembler::store_klass_gap(Register dst, Register src) {
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
// Store to klass gap in destination
movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
}
@@ -5075,7 +5075,7 @@
// when (Universe::heap() != NULL). Hence, if the instructions they
// generate change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() {
- assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+ assert (UseCompressedClassPointers, "only for compressed klass ptrs");
// mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
}
@@ -5085,7 +5085,7 @@
void MacroAssembler::decode_klass_not_null(Register r) {
// Note: it will change flags
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert(r != r12_heapbase, "Decoding a klass in r12");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
@@ -5103,7 +5103,7 @@
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
// Note: it will change flags
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (UseCompressedClassPointers, "should only be used for compressed headers");
if (dst == src) {
decode_klass_not_null(dst);
} else {
@@ -5141,7 +5141,7 @@
}
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5149,7 +5149,7 @@
}
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5175,7 +5175,7 @@
}
void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5183,7 +5183,7 @@
}
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5191,7 +5191,7 @@
}
void MacroAssembler::reinit_heapbase() {
- if (UseCompressedOops || UseCompressedKlassPointers) {
+ if (UseCompressedOops || UseCompressedClassPointers) {
if (Universe::heap() != NULL) {
if (Universe::narrow_oop_base() == NULL) {
MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -211,11 +211,11 @@
if (is_vtable_stub) {
// Vtable stub size
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
- (UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+ (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
} else {
// Itable stub size
return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
- (UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+ (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
}
// In order to tune these parameters, run the JVM with VM options
// +PrintMiscellaneous and +WizardMode to see information about
--- a/hotspot/src/cpu/x86/vm/x86_64.ad Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad Thu Sep 19 09:26:08 2013 +0200
@@ -1391,7 +1391,7 @@
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
@@ -1408,7 +1408,7 @@
{
MacroAssembler masm(&cbuf);
uint insts_size = cbuf.insts_size();
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
masm.load_klass(rscratch1, j_rarg0);
masm.cmpptr(rax, rscratch1);
} else {
@@ -1557,7 +1557,7 @@
}
bool Matcher::narrow_klass_use_complex_address() {
- assert(UseCompressedKlassPointers, "only for compressed klass code");
+ assert(UseCompressedClassPointers, "only for compressed klass code");
return (LogKlassAlignmentInBytes <= 3);
}
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -3589,8 +3589,6 @@
#endif
}
- os::large_page_init();
-
// initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) {
perror("SR_initialize failed");
--- a/hotspot/src/os/linux/vm/os_linux.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -4805,8 +4805,6 @@
#endif
}
- os::large_page_init();
-
// initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) {
perror("SR_initialize failed");
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -5178,9 +5178,7 @@
if(Verbose && PrintMiscellaneous)
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
-}
-
- os::large_page_init();
+ }
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
--- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -3189,9 +3189,12 @@
return p_buf;
} else {
+ if (TracePageSizes && Verbose) {
+ tty->print_cr("Reserving large pages in a single large chunk.");
+ }
// normal policy just allocate it all at once
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
- char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
+ char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
if (res != NULL) {
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
@@ -3917,8 +3920,6 @@
#endif
}
- os::large_page_init();
-
// Setup Windows Exceptions
// for debugging float code generation bugs
@@ -5714,7 +5715,66 @@
#endif
#ifndef PRODUCT
+
+// test the code path in reserve_memory_special() that tries to allocate memory in a single
+// contiguous memory block at a particular address.
+// The test first tries to find a good approximate address to allocate at by using the same
+// method to allocate some memory at any address. The test then tries to allocate memory in
+// the vicinity (not directly after it to avoid possible by-chance use of that location)
+// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
+// the previously allocated memory is available for allocation. The only actual failure
+// that is reported is when the test tries to allocate at a particular location but gets a
+// different valid one. A NULL return value at this point is not considered an error but may
+// be legitimate.
+// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
void TestReserveMemorySpecial_test() {
- // No tests available for this platform
-}
-#endif
+ if (!UseLargePages) {
+ if (VerboseInternalVMTests) {
+ gclog_or_tty->print("Skipping test because large pages are disabled");
+ }
+ return;
+ }
+ // save current value of globals
+ bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
+ bool old_use_numa_interleaving = UseNUMAInterleaving;
+
+ // set globals to make sure we hit the correct code path
+ UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
+
+ // do an allocation at an address selected by the OS to get a good one.
+ const size_t large_allocation_size = os::large_page_size() * 4;
+ char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
+ if (result == NULL) {
+ if (VerboseInternalVMTests) {
+ gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
+ large_allocation_size);
+ }
+ } else {
+ os::release_memory_special(result, large_allocation_size);
+
+ // allocate another page within the recently allocated memory area which seems to be a good location. At least
+ // we managed to get it once.
+ const size_t expected_allocation_size = os::large_page_size();
+ char* expected_location = result + os::large_page_size();
+ char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
+ if (actual_location == NULL) {
+ if (VerboseInternalVMTests) {
+ gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
+ expected_location, large_allocation_size);
+ }
+ } else {
+ // release memory
+ os::release_memory_special(actual_location, expected_allocation_size);
+ // only now check, after releasing any memory to avoid any leaks.
+ assert(actual_location == expected_location,
+ err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
+ expected_location, expected_allocation_size, actual_location));
+ }
+ }
+
+ // restore globals
+ UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
+ UseNUMAInterleaving = old_use_numa_interleaving;
+}
+#endif // PRODUCT
+
--- a/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -35,7 +35,9 @@
// Used on 64 bit platforms for UseCompressedOops base address
#ifdef _LP64
-define_pd_global(uintx, HeapBaseMinAddress, CONST64(4)*G);
+// use 6G as default base address because by default the OS maps the application
+// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
+define_pd_global(uintx, HeapBaseMinAddress, CONST64(6)*G);
#else
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
#endif
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -230,7 +230,7 @@
// depends on this property.
debug_only(
FreeChunk* junk = NULL;
- assert(UseCompressedKlassPointers ||
+ assert(UseCompressedClassPointers ||
junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
"Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc");
@@ -1407,7 +1407,7 @@
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
OrderAccess::storestore();
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
// Copy gap missed by (aligned) header size calculation below
obj->set_klass_gap(old->klass_gap());
}
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -481,9 +481,8 @@
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_g1h(g1h),
- _markBitMap1(MinObjAlignment - 1),
- _markBitMap2(MinObjAlignment - 1),
-
+ _markBitMap1(log2_intptr(MinObjAlignment)),
+ _markBitMap2(log2_intptr(MinObjAlignment)),
_parallel_marking_threads(0),
_max_parallel_marking_threads(0),
_sleep_factor(0.0),
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -33,8 +33,8 @@
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
if (has_count_table()) {
- check_card_num(from_card_num,
- err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+ assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
+ err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
assert(from_card_num < to_card_num,
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
from_card_num, to_card_num));
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -72,25 +72,21 @@
return has_reserved_count_table() && _committed_max_card_num > 0;
}
- void check_card_num(size_t card_num, const char* msg) {
- assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
- }
-
size_t ptr_2_card_num(const jbyte* card_ptr) {
assert(card_ptr >= _ct_bot,
- err_msg("Inavalied card pointer: "
+ err_msg("Invalid card pointer: "
"card_ptr: " PTR_FORMAT ", "
"_ct_bot: " PTR_FORMAT,
card_ptr, _ct_bot));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
- check_card_num(card_num,
- err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+ assert(card_num >= 0 && card_num < _committed_max_card_num,
+ err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
return card_num;
}
jbyte* card_num_2_ptr(size_t card_num) {
- check_card_num(card_num,
- err_msg("card num out of range: "SIZE_FORMAT, card_num));
+ assert(card_num >= 0 && card_num < _committed_max_card_num,
+ err_msg("card num out of range: "SIZE_FORMAT, card_num));
return (jbyte*) (_ct_bot + card_num);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -2191,6 +2191,10 @@
return JNI_OK;
}
+size_t G1CollectedHeap::conservative_max_heap_alignment() {
+ return HeapRegion::max_region_size();
+}
+
void G1CollectedHeap::ref_processing_init() {
// Reference processing in G1 currently works as follows:
//
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -1092,6 +1092,9 @@
// specified by the policy object.
jint initialize();
+ // Return the (conservative) maximum heap alignment for any G1 heap
+ static size_t conservative_max_heap_alignment();
+
// Initialize weak reference processing.
virtual void ref_processing_init();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -149,6 +149,10 @@
// many regions in the heap (based on the min heap size).
#define TARGET_REGION_NUMBER 2048
+size_t HeapRegion::max_region_size() {
+ return (size_t)MAX_REGION_SIZE;
+}
+
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -355,6 +355,8 @@
~((1 << (size_t) LogOfHRGrainBytes) - 1);
}
+ static size_t max_region_size();
+
// It sets up the heap region size (GrainBytes / GrainWords), as
// well as other related fields that are based on the heap region
// size (LogOfHRGrainBytes / LogOfHRGrainWords /
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,9 +68,6 @@
size_t min_old_gen_size() { return _min_gen1_size; }
size_t old_gen_size() { return _initial_gen1_size; }
size_t max_old_gen_size() { return _max_gen1_size; }
-
- size_t metaspace_size() { return MetaspaceSize; }
- size_t max_metaspace_size() { return MaxMetaspaceSize; }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -86,6 +86,11 @@
set_alignment(_old_gen_alignment, intra_heap_alignment());
}
+ // Return the (conservative) maximum heap alignment
+ static size_t conservative_max_heap_alignment() {
+ return intra_heap_alignment();
+ }
+
// For use by VM operations
enum CollectionType {
Scavenge,
@@ -122,7 +127,7 @@
// The alignment used for eden and survivors within the young gen
// and for boundary between young gen and old gen.
- size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
+ static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
size_t capacity() const;
size_t used() const;
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -87,15 +87,15 @@
const MetaspaceSizes meta_space(
MetaspaceAux::allocated_capacity_bytes(),
MetaspaceAux::allocated_used_bytes(),
- MetaspaceAux::reserved_in_bytes());
+ MetaspaceAux::reserved_bytes());
const MetaspaceSizes data_space(
MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
- MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
+ MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
const MetaspaceSizes class_space(
MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
- MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
+ MetaspaceAux::reserved_bytes(Metaspace::ClassType));
return MetaspaceSummary(meta_space, data_space, class_space);
}
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -47,6 +47,11 @@
// CollectorPolicy methods.
+// Align down. If the aligning result in 0, return 'alignment'.
+static size_t restricted_align_down(size_t size, size_t alignment) {
+ return MAX2(alignment, align_size_down_(size, alignment));
+}
+
void CollectorPolicy::initialize_flags() {
assert(max_alignment() >= min_alignment(),
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
@@ -59,18 +64,24 @@
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
- if (MetaspaceSize > MaxMetaspaceSize) {
- MaxMetaspaceSize = MetaspaceSize;
+ if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
+ FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
+ restricted_align_down(MaxMetaspaceSize, max_alignment()));
}
- MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
- // Don't increase Metaspace size limit above specified.
- MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
+
if (MetaspaceSize > MaxMetaspaceSize) {
- MetaspaceSize = MaxMetaspaceSize;
+ FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
}
- MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
- MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
+ if (!is_size_aligned(MetaspaceSize, min_alignment())) {
+ FLAG_SET_ERGO(uintx, MetaspaceSize,
+ restricted_align_down(MetaspaceSize, min_alignment()));
+ }
+
+ assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
+
+ MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
+ MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
@@ -145,6 +156,30 @@
_all_soft_refs_clear = true;
}
+size_t CollectorPolicy::compute_max_alignment() {
+ // The card marking array and the offset arrays for old generations are
+ // committed in os pages as well. Make sure they are entirely full (to
+ // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
+ // byte entry and the os page size is 4096, the maximum heap size should
+ // be 512*4096 = 2MB aligned.
+
+ // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
+ // is supported.
+ // Requirements of any new remembered set implementations must be added here.
+ size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+
+ // Parallel GC does its own alignment of the generations to avoid requiring a
+ // large page (256M on some platforms) for the permanent generation. The
+ // other collectors should also be updated to do their own alignment and then
+ // this use of lcm() should be removed.
+ if (UseLargePages && !UseParallelGC) {
+ // in presence of large pages we have to make sure that our
+ // alignment is large page aware
+ alignment = lcm(os::large_page_size(), alignment);
+ }
+
+ return alignment;
+}
// GenCollectorPolicy methods.
@@ -175,29 +210,6 @@
GCTimeRatio);
}
-size_t GenCollectorPolicy::compute_max_alignment() {
- // The card marking array and the offset arrays for old generations are
- // committed in os pages as well. Make sure they are entirely full (to
- // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
- // byte entry and the os page size is 4096, the maximum heap size should
- // be 512*4096 = 2MB aligned.
- size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-
- // Parallel GC does its own alignment of the generations to avoid requiring a
- // large page (256M on some platforms) for the permanent generation. The
- // other collectors should also be updated to do their own alignment and then
- // this use of lcm() should be removed.
- if (UseLargePages && !UseParallelGC) {
- // in presence of large pages we have to make sure that our
- // alignment is large page aware
- alignment = lcm(os::large_page_size(), alignment);
- }
-
- assert(alignment >= min_alignment(), "Must be");
-
- return alignment;
-}
-
void GenCollectorPolicy::initialize_flags() {
// All sizes must be multiples of the generation granularity.
set_min_alignment((uintx) Generation::GenGrain);
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -98,6 +98,9 @@
{}
public:
+ // Return maximum heap alignment that may be imposed by the policy
+ static size_t compute_max_alignment();
+
void set_min_alignment(size_t align) { _min_alignment = align; }
size_t min_alignment() { return _min_alignment; }
void set_max_alignment(size_t align) { _max_alignment = align; }
@@ -234,9 +237,6 @@
// Try to allocate space by expanding the heap.
virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
- // compute max heap alignment
- size_t compute_max_alignment();
-
// Scale the base_size by NewRation according to
// result = base_size / (NewRatio + 1)
// and align by min_alignment()
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -148,6 +148,11 @@
return gen_policy()->size_policy();
}
+ // Return the (conservative) maximum heap alignment
+ static size_t conservative_max_heap_alignment() {
+ return Generation::GenGrain;
+ }
+
size_t capacity() const;
size_t used() const;
--- a/hotspot/src/share/vm/memory/metablock.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/metablock.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -50,13 +50,6 @@
// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
size_t Metablock::_min_block_byte_size = sizeof(Metablock);
-#ifdef ASSERT
-size_t Metablock::_overhead =
- Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
-#else
-size_t Metablock::_overhead = 0;
-#endif
-
// New blocks returned by the Metaspace are zero initialized.
// We should fix the constructors to not assume this instead.
Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
--- a/hotspot/src/share/vm/memory/metablock.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/metablock.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -48,7 +48,6 @@
} _header;
} _block;
static size_t _min_block_byte_size;
- static size_t _overhead;
typedef union block_t Block;
typedef struct header_t Header;
@@ -73,7 +72,6 @@
void set_prev(Metablock* v) { _block._header._prev = v; }
static size_t min_block_byte_size() { return _min_block_byte_size; }
- static size_t overhead() { return _overhead; }
bool is_free() { return header()->_word_size != 0; }
void clear_next() { set_next(NULL); }
--- a/hotspot/src/share/vm/memory/metaspace.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -51,7 +51,7 @@
// Parameters for stress mode testing
const uint metadata_deallocate_a_lot_block = 10;
const uint metadata_deallocate_a_lock_chunk = 3;
-size_t const allocation_from_dictionary_limit = 64 * K;
+size_t const allocation_from_dictionary_limit = 4 * K;
MetaWord* last_allocated = 0;
@@ -177,8 +177,8 @@
void return_chunks(ChunkIndex index, Metachunk* chunks);
// Total of the space in the free chunks list
- size_t free_chunks_total();
- size_t free_chunks_total_in_bytes();
+ size_t free_chunks_total_words();
+ size_t free_chunks_total_bytes();
// Number of chunks in the free chunks list
size_t free_chunks_count();
@@ -228,6 +228,10 @@
BlockTreeDictionary* _dictionary;
static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
+ // Only allocate and split from freelist if the size of the allocation
+ // is at least 1/4th the size of the available block.
+ const static int WasteMultiplier = 4;
+
// Accessors
BlockTreeDictionary* dictionary() const { return _dictionary; }
@@ -287,6 +291,10 @@
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
+ size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
+ size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; }
+ size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
+
// address of next available space in _virtual_space;
// Accessors
VirtualSpaceNode* next() { return _next; }
@@ -323,12 +331,10 @@
// Allocate a chunk from the virtual space and return it.
Metachunk* get_chunk_vs(size_t chunk_word_size);
- Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
// Expands/shrinks the committed space in a virtual space. Delegates
// to Virtualspace
bool expand_by(size_t words, bool pre_touch = false);
- bool shrink_by(size_t words);
// In preparation for deleting this node, remove all the chunks
// in the node from any freelist.
@@ -336,8 +342,6 @@
#ifdef ASSERT
// Debug support
- static void verify_virtual_space_total();
- static void verify_virtual_space_count();
void mangle();
#endif
@@ -423,10 +427,13 @@
// Can this virtual list allocate >1 spaces? Also, used to determine
// whether to allocate unlimited small chunks in this virtual space
bool _is_class;
- bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
-
- // Sum of space in all virtual spaces and number of virtual spaces
- size_t _virtual_space_total;
+ bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
+
+ // Sum of reserved and committed memory in the virtual spaces
+ size_t _reserved_words;
+ size_t _committed_words;
+
+ // Number of virtual spaces
size_t _virtual_space_count;
~VirtualSpaceList();
@@ -440,7 +447,7 @@
_current_virtual_space = v;
}
- void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
+ void link_vs(VirtualSpaceNode* new_entry);
// Get another virtual space and add it to the list. This
// is typically prompted by a failed attempt to allocate a chunk
@@ -457,6 +464,8 @@
size_t grow_chunks_by_words,
size_t medium_chunk_bunch);
+ bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
+
// Get the first chunk for a Metaspace. Used for
// special cases such as the boot class loader, reflection
// class loader and anonymous class loader.
@@ -472,10 +481,15 @@
// Allocate the first virtualspace.
void initialize(size_t word_size);
- size_t virtual_space_total() { return _virtual_space_total; }
-
- void inc_virtual_space_total(size_t v);
- void dec_virtual_space_total(size_t v);
+ size_t reserved_words() { return _reserved_words; }
+ size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
+ size_t committed_words() { return _committed_words; }
+ size_t committed_bytes() { return committed_words() * BytesPerWord; }
+
+ void inc_reserved_words(size_t v);
+ void dec_reserved_words(size_t v);
+ void inc_committed_words(size_t v);
+ void dec_committed_words(size_t v);
void inc_virtual_space_count();
void dec_virtual_space_count();
@@ -623,6 +637,7 @@
// Add chunk to the list of chunks in use
void add_chunk(Metachunk* v, bool make_current);
+ void retire_current_chunk();
Mutex* lock() const { return _lock; }
@@ -722,9 +737,7 @@
// MinChunkSize is a placeholder for the real minimum size JJJ
size_t byte_size = word_size * BytesPerWord;
- size_t byte_size_with_overhead = byte_size + Metablock::overhead();
-
- size_t raw_bytes_size = MAX2(byte_size_with_overhead,
+ size_t raw_bytes_size = MAX2(byte_size,
Metablock::min_block_byte_size());
raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
size_t raw_word_size = raw_bytes_size / BytesPerWord;
@@ -807,12 +820,25 @@
}
Metablock* free_block =
- dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
+ dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
if (free_block == NULL) {
return NULL;
}
- return (MetaWord*) free_block;
+ const size_t block_size = free_block->size();
+ if (block_size > WasteMultiplier * word_size) {
+ return_block((MetaWord*)free_block, block_size);
+ return NULL;
+ }
+
+ MetaWord* new_block = (MetaWord*)free_block;
+ assert(block_size >= word_size, "Incorrect size of block from freelist");
+ const size_t unused = block_size - word_size;
+ if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+ return_block(new_block + word_size, unused);
+ }
+
+ return new_block;
}
void BlockFreelist::print_on(outputStream* st) const {
@@ -855,9 +881,9 @@
if (!is_available(chunk_word_size)) {
if (TraceMetadataChunkAllocation) {
- tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
+ gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
// Dump some information about the virtual space that is nearly full
- print_on(tty);
+ print_on(gclog_or_tty);
}
return NULL;
}
@@ -878,20 +904,11 @@
if (TraceMetavirtualspaceAllocation && !result) {
gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
"for byte size " SIZE_FORMAT, bytes);
- virtual_space()->print();
+ virtual_space()->print_on(gclog_or_tty);
}
return result;
}
-// Shrink the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::shrink_by(size_t words) {
- size_t bytes = words * BytesPerWord;
- virtual_space()->shrink_by(bytes);
- return true;
-}
-
-// Add another chunk to the chunk list.
-
Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
assert_lock_strong(SpaceManager::expand_lock());
Metachunk* result = take_from_committed(chunk_word_size);
@@ -901,23 +918,6 @@
return result;
}
-Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
- assert_lock_strong(SpaceManager::expand_lock());
-
- Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
-
- if (new_chunk == NULL) {
- // Only a small part of the virtualspace is committed when first
- // allocated so committing more here can be expected.
- size_t page_size_words = os::vm_page_size() / BytesPerWord;
- size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
- page_size_words);
- expand_by(aligned_expand_vs_by_words, false);
- new_chunk = get_chunk_vs(chunk_word_size);
- }
- return new_chunk;
-}
-
bool VirtualSpaceNode::initialize() {
if (!_rs.is_reserved()) {
@@ -977,13 +977,22 @@
}
}
-void VirtualSpaceList::inc_virtual_space_total(size_t v) {
+void VirtualSpaceList::inc_reserved_words(size_t v) {
assert_lock_strong(SpaceManager::expand_lock());
- _virtual_space_total = _virtual_space_total + v;
+ _reserved_words = _reserved_words + v;
+}
+void VirtualSpaceList::dec_reserved_words(size_t v) {
+ assert_lock_strong(SpaceManager::expand_lock());
+ _reserved_words = _reserved_words - v;
}
-void VirtualSpaceList::dec_virtual_space_total(size_t v) {
+
+void VirtualSpaceList::inc_committed_words(size_t v) {
assert_lock_strong(SpaceManager::expand_lock());
- _virtual_space_total = _virtual_space_total - v;
+ _committed_words = _committed_words + v;
+}
+void VirtualSpaceList::dec_committed_words(size_t v) {
+ assert_lock_strong(SpaceManager::expand_lock());
+ _committed_words = _committed_words - v;
}
void VirtualSpaceList::inc_virtual_space_count() {
@@ -1034,7 +1043,8 @@
}
vsl->purge(chunk_manager());
- dec_virtual_space_total(vsl->reserved()->word_size());
+ dec_reserved_words(vsl->reserved_words());
+ dec_committed_words(vsl->committed_words());
dec_virtual_space_count();
purged_vsl = vsl;
delete vsl;
@@ -1062,12 +1072,12 @@
// Sum used region [bottom, top) in each virtualspace
allocated_by_vs += vsl->used_words_in_vs();
}
- assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
+ assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
err_msg("Total in free chunks " SIZE_FORMAT
" greater than total from virtual_spaces " SIZE_FORMAT,
- allocated_by_vs, chunk_manager()->free_chunks_total()));
+ allocated_by_vs, chunk_manager()->free_chunks_total_words()));
size_t used =
- allocated_by_vs - chunk_manager()->free_chunks_total();
+ allocated_by_vs - chunk_manager()->free_chunks_total_words();
return used;
}
@@ -1088,7 +1098,8 @@
_is_class(false),
_virtual_space_list(NULL),
_current_virtual_space(NULL),
- _virtual_space_total(0),
+ _reserved_words(0),
+ _committed_words(0),
_virtual_space_count(0) {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
@@ -1105,7 +1116,8 @@
_is_class(true),
_virtual_space_list(NULL),
_current_virtual_space(NULL),
- _virtual_space_total(0),
+ _reserved_words(0),
+ _committed_words(0),
_virtual_space_count(0) {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
@@ -1115,7 +1127,7 @@
_chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
_chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
assert(succeeded, " VirtualSpaceList initialization should not fail");
- link_vs(class_entry, rs.size()/BytesPerWord);
+ link_vs(class_entry);
}
size_t VirtualSpaceList::free_bytes() {
@@ -1138,31 +1150,47 @@
delete new_entry;
return false;
} else {
+ assert(new_entry->reserved_words() == vs_word_size, "Must be");
// ensure lock-free iteration sees fully initialized node
OrderAccess::storestore();
- link_vs(new_entry, vs_word_size);
+ link_vs(new_entry);
return true;
}
}
-void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
+void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
if (virtual_space_list() == NULL) {
set_virtual_space_list(new_entry);
} else {
current_virtual_space()->set_next(new_entry);
}
set_current_virtual_space(new_entry);
- inc_virtual_space_total(vs_word_size);
+ inc_reserved_words(new_entry->reserved_words());
+ inc_committed_words(new_entry->committed_words());
inc_virtual_space_count();
#ifdef ASSERT
new_entry->mangle();
#endif
if (TraceMetavirtualspaceAllocation && Verbose) {
VirtualSpaceNode* vsl = current_virtual_space();
- vsl->print_on(tty);
+ vsl->print_on(gclog_or_tty);
}
}
+bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
+ size_t before = node->committed_words();
+
+ bool result = node->expand_by(word_size, pre_touch);
+
+ size_t after = node->committed_words();
+
+ // after and before can be the same if the memory was pre-committed.
+ assert(after >= before, "Must be");
+ inc_committed_words(after - before);
+
+ return result;
+}
+
Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words,
size_t medium_chunk_bunch) {
@@ -1186,7 +1214,7 @@
size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
page_size_words);
bool vs_expanded =
- current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
+ expand_by(current_virtual_space(), aligned_expand_vs_by_words);
if (!vs_expanded) {
// Should the capacity of the metaspaces be expanded for
// this allocation? If it's the virtual space for classes and is
@@ -1197,7 +1225,14 @@
MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
if (grow_vs(grow_vs_words)) {
// Got it. It's on the list now. Get a chunk from it.
- next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
+ assert(current_virtual_space()->expanded_words() == 0,
+ "New virtuals space nodes should not have expanded");
+
+ size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
+ page_size_words);
+ // We probably want to expand by aligned_expand_vs_by_words here.
+ expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
+ next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
}
} else {
// Allocation will fail and induce a GC
@@ -1307,7 +1342,7 @@
// reserved space, because this is a larger space prereserved for compressed
// class pointers.
if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
- size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
+ size_t real_allocated = Metaspace::space_list()->reserved_words() +
MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
if (real_allocated >= MaxMetaspaceSize) {
return false;
@@ -1508,7 +1543,7 @@
sm->sum_count_in_chunks_in_use());
dummy_chunk->print_on(gclog_or_tty);
gclog_or_tty->print_cr(" Free chunks total %d count %d",
- vsl->chunk_manager()->free_chunks_total(),
+ vsl->chunk_manager()->free_chunks_total_words(),
vsl->chunk_manager()->free_chunks_count());
}
}
@@ -1565,12 +1600,12 @@
// ChunkManager methods
-size_t ChunkManager::free_chunks_total() {
+size_t ChunkManager::free_chunks_total_words() {
return _free_chunks_total;
}
-size_t ChunkManager::free_chunks_total_in_bytes() {
- return free_chunks_total() * BytesPerWord;
+size_t ChunkManager::free_chunks_total_bytes() {
+ return free_chunks_total_words() * BytesPerWord;
}
size_t ChunkManager::free_chunks_count() {
@@ -1698,9 +1733,9 @@
assert_lock_strong(SpaceManager::expand_lock());
slow_locked_verify();
if (TraceMetadataChunkAllocation) {
- tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
- PTR_FORMAT " size " SIZE_FORMAT,
- chunk, chunk->word_size());
+ gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
+ PTR_FORMAT " size " SIZE_FORMAT,
+ chunk, chunk->word_size());
}
free_chunks_put(chunk);
}
@@ -1729,9 +1764,9 @@
dec_free_chunks_total(chunk->capacity_word_size());
if (TraceMetadataChunkAllocation && Verbose) {
- tty->print_cr("ChunkManager::free_chunks_get: free_list "
- PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
- free_list, chunk, chunk->word_size());
+ gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
+ PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
+ free_list, chunk, chunk->word_size());
}
} else {
chunk = humongous_dictionary()->get_chunk(
@@ -1741,10 +1776,10 @@
if (chunk != NULL) {
if (TraceMetadataHumongousAllocation) {
size_t waste = chunk->word_size() - word_size;
- tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
- " for requested size " SIZE_FORMAT
- " waste " SIZE_FORMAT,
- chunk->word_size(), word_size, waste);
+ gclog_or_tty->print_cr("Free list allocate humongous chunk size "
+ SIZE_FORMAT " for requested size " SIZE_FORMAT
+ " waste " SIZE_FORMAT,
+ chunk->word_size(), word_size, waste);
}
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
@@ -1786,10 +1821,10 @@
} else {
list_count = humongous_dictionary()->total_count();
}
- tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
- PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
- this, chunk, chunk->word_size(), list_count);
- locked_print_free_chunks(tty);
+ gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
+ PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
+ this, chunk, chunk->word_size(), list_count);
+ locked_print_free_chunks(gclog_or_tty);
}
return chunk;
@@ -2278,6 +2313,7 @@
ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
if (index != HumongousIndex) {
+ retire_current_chunk();
set_current_chunk(new_chunk);
new_chunk->set_next(chunks_in_use(index));
set_chunks_in_use(index, new_chunk);
@@ -2308,7 +2344,17 @@
sum_count_in_chunks_in_use());
new_chunk->print_on(gclog_or_tty);
if (vs_list() != NULL) {
- vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+ vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
+ }
+ }
+}
+
+void SpaceManager::retire_current_chunk() {
+ if (current_chunk() != NULL) {
+ size_t remaining_words = current_chunk()->free_word_size();
+ if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+ block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
+ inc_used_metrics(remaining_words);
}
}
}
@@ -2320,10 +2366,10 @@
grow_chunks_by_words,
medium_chunk_bunch());
- if (TraceMetadataHumongousAllocation &&
+ if (TraceMetadataHumongousAllocation && next != NULL &&
SpaceManager::is_humongous(next->word_size())) {
- gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT,
- next->word_size());
+ gclog_or_tty->print_cr(" new humongous chunk word size "
+ PTR_FORMAT, next->word_size());
}
return next;
@@ -2441,9 +2487,6 @@
curr = curr->next()) {
out->print("%d) ", i++);
curr->print_on(out);
- if (TraceMetadataChunkAllocation && Verbose) {
- block_freelists()->print_on(out);
- }
curr_total += curr->word_size();
used += curr->used_word_size();
capacity += curr->capacity_word_size();
@@ -2451,6 +2494,10 @@
}
}
+ if (TraceMetadataChunkAllocation && Verbose) {
+ block_freelists()->print_on(out);
+ }
+
size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
// Free space isn't wasted.
waste -= free;
@@ -2538,13 +2585,13 @@
return used * BytesPerWord;
}
-size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
size_t free = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
Metaspace* msp = iter.get_next();
if (msp != NULL) {
- free += msp->free_words(mdtype);
+ free += msp->free_words_slow(mdtype);
}
}
return free * BytesPerWord;
@@ -2567,34 +2614,56 @@
return capacity * BytesPerWord;
}
-size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
- VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
- return list == NULL ? 0 : list->virtual_space_total();
+size_t MetaspaceAux::capacity_bytes_slow() {
+#ifdef PRODUCT
+ // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+ guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
+#endif
+ size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
+ size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
+ assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
+ err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+ " class_capacity + non_class_capacity " SIZE_FORMAT
+ " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
+ allocated_capacity_bytes(), class_capacity + non_class_capacity,
+ class_capacity, non_class_capacity));
+
+ return class_capacity + non_class_capacity;
}
-size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
-
-size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
+ VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+ return list == NULL ? 0 : list->reserved_bytes();
+}
+
+size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
+ VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+ return list == NULL ? 0 : list->committed_bytes();
+}
+
+size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
+
+size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
if (list == NULL) {
return 0;
}
ChunkManager* chunk = list->chunk_manager();
chunk->slow_verify();
- return chunk->free_chunks_total();
+ return chunk->free_chunks_total_words();
}
-size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
- return free_chunks_total(mdtype) * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
+ return free_chunks_total_words(mdtype) * BytesPerWord;
}
-size_t MetaspaceAux::free_chunks_total() {
- return free_chunks_total(Metaspace::ClassType) +
- free_chunks_total(Metaspace::NonClassType);
+size_t MetaspaceAux::free_chunks_total_words() {
+ return free_chunks_total_words(Metaspace::ClassType) +
+ free_chunks_total_words(Metaspace::NonClassType);
}
-size_t MetaspaceAux::free_chunks_total_in_bytes() {
- return free_chunks_total() * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes() {
+ return free_chunks_total_words() * BytesPerWord;
}
void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
@@ -2605,14 +2674,14 @@
"(" SIZE_FORMAT ")",
prev_metadata_used,
allocated_used_bytes(),
- reserved_in_bytes());
+ reserved_bytes());
} else {
gclog_or_tty->print(" " SIZE_FORMAT "K"
"->" SIZE_FORMAT "K"
"(" SIZE_FORMAT "K)",
- prev_metadata_used / K,
- allocated_used_bytes() / K,
- reserved_in_bytes()/ K);
+ prev_metadata_used/K,
+ allocated_used_bytes()/K,
+ reserved_bytes()/K);
}
gclog_or_tty->print("]");
@@ -2625,14 +2694,14 @@
out->print_cr(" Metaspace total "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
- allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
+ allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
out->print_cr(" data space "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
allocated_capacity_bytes(nct)/K,
allocated_used_bytes(nct)/K,
- reserved_in_bytes(nct)/K);
+ reserved_bytes(nct)/K);
if (Metaspace::using_class_space()) {
Metaspace::MetadataType ct = Metaspace::ClassType;
out->print_cr(" class space "
@@ -2640,17 +2709,17 @@
" reserved " SIZE_FORMAT "K",
allocated_capacity_bytes(ct)/K,
allocated_used_bytes(ct)/K,
- reserved_in_bytes(ct)/K);
+ reserved_bytes(ct)/K);
}
}
// Print information for class space and data space separately.
// This is almost the same as above.
void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
- size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
+ size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
size_t capacity_bytes = capacity_bytes_slow(mdtype);
size_t used_bytes = used_bytes_slow(mdtype);
- size_t free_bytes = free_in_bytes(mdtype);
+ size_t free_bytes = free_bytes_slow(mdtype);
size_t used_and_free = used_bytes + free_bytes +
free_chunks_capacity_bytes;
out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
@@ -2836,7 +2905,7 @@
// to work with compressed klass pointers.
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
- assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+ assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
address lower_base = MIN2((address)metaspace_base, cds_base);
address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
(address)(metaspace_base + class_metaspace_size()));
@@ -2846,7 +2915,7 @@
// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
assert(using_class_space(), "called improperly");
- assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+ assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
"Metaspace size is too big");
@@ -2869,9 +2938,9 @@
// If no successful allocation then try to allocate the space anywhere. If
// that fails then OOM doom. At this point we cannot try allocating the
- // metaspace as if UseCompressedKlassPointers is off because too much
- // initialization has happened that depends on UseCompressedKlassPointers.
- // So, UseCompressedKlassPointers cannot be turned off at this point.
+ // metaspace as if UseCompressedClassPointers is off because too much
+ // initialization has happened that depends on UseCompressedClassPointers.
+ // So, UseCompressedClassPointers cannot be turned off at this point.
if (!metaspace_rs.is_reserved()) {
metaspace_rs = ReservedSpace(class_metaspace_size(),
os::vm_allocation_granularity(), false);
@@ -2904,12 +2973,12 @@
}
}
-// For UseCompressedKlassPointers the class space is reserved above the top of
+// For UseCompressedClassPointers the class space is reserved above the top of
// the Java heap. The argument passed in is at the base of the compressed space.
void Metaspace::initialize_class_space(ReservedSpace rs) {
// The reserved space size may be bigger because of alignment, esp with UseLargePages
- assert(rs.size() >= ClassMetaspaceSize,
- err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
+ assert(rs.size() >= CompressedClassSpaceSize,
+ err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs);
}
@@ -2921,7 +2990,7 @@
int max_alignment = os::vm_page_size();
size_t cds_total = 0;
- set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
+ set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
os::vm_allocation_granularity()));
MetaspaceShared::set_max_alignment(max_alignment);
@@ -2941,8 +3010,8 @@
#ifdef _LP64
// Set the compressed klass pointer base so that decoding of these pointers works
// properly when creating the shared archive.
- assert(UseCompressedOops && UseCompressedKlassPointers,
- "UseCompressedOops and UseCompressedKlassPointers must be set");
+ assert(UseCompressedOops && UseCompressedClassPointers,
+ "UseCompressedOops and UseCompressedClassPointers must be set");
Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
if (TraceMetavirtualspaceAllocation && Verbose) {
gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
@@ -2979,7 +3048,7 @@
}
#ifdef _LP64
- // If UseCompressedKlassPointers is set then allocate the metaspace area
+ // If UseCompressedClassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists).
if (using_class_space()) {
if (UseSharedSpaces) {
@@ -2997,7 +3066,7 @@
// on the medium chunk list. The next chunk will be small and progress
// from there. This size calculated by -version.
_first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
- (ClassMetaspaceSize/BytesPerWord)*2);
+ (CompressedClassSpaceSize/BytesPerWord)*2);
_first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
// Arbitrarily set the initial virtual space to a multiple
// of the boot class loader size.
@@ -3064,7 +3133,7 @@
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
// DumpSharedSpaces doesn't use class metadata area (yet)
- // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
+ // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
if (mdtype == ClassType && using_class_space()) {
return class_vsm()->allocate(word_size);
} else {
@@ -3103,7 +3172,7 @@
}
}
-size_t Metaspace::free_words(MetadataType mdtype) const {
+size_t Metaspace::free_words_slow(MetadataType mdtype) const {
if (mdtype == ClassType) {
return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
} else {
@@ -3213,7 +3282,7 @@
MetaspaceAux::dump(gclog_or_tty);
}
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
- const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
+ const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
"Metadata space";
report_java_out_of_memory(space_string);
@@ -3311,3 +3380,59 @@
class_vsm()->dump(out);
}
}
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class MetaspaceAuxTest : AllStatic {
+ public:
+ static void test_reserved() {
+ size_t reserved = MetaspaceAux::reserved_bytes();
+
+ assert(reserved > 0, "assert");
+
+ size_t committed = MetaspaceAux::committed_bytes();
+ assert(committed <= reserved, "assert");
+
+ size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+ assert(reserved_metadata > 0, "assert");
+ assert(reserved_metadata <= reserved, "assert");
+
+ if (UseCompressedClassPointers) {
+ size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
+ assert(reserved_class > 0, "assert");
+ assert(reserved_class < reserved, "assert");
+ }
+ }
+
+ static void test_committed() {
+ size_t committed = MetaspaceAux::committed_bytes();
+
+ assert(committed > 0, "assert");
+
+ size_t reserved = MetaspaceAux::reserved_bytes();
+ assert(committed <= reserved, "assert");
+
+ size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
+ assert(committed_metadata > 0, "assert");
+ assert(committed_metadata <= committed, "assert");
+
+ if (UseCompressedClassPointers) {
+ size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+ assert(committed_class > 0, "assert");
+ assert(committed_class < committed, "assert");
+ }
+ }
+
+ static void test() {
+ test_reserved();
+ test_committed();
+ }
+};
+
+void MetaspaceAux_test() {
+ MetaspaceAuxTest::test();
+}
+
+#endif
--- a/hotspot/src/share/vm/memory/metaspace.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -182,9 +182,8 @@
char* bottom() const;
size_t used_words_slow(MetadataType mdtype) const;
- size_t free_words(MetadataType mdtype) const;
+ size_t free_words_slow(MetadataType mdtype) const;
size_t capacity_words_slow(MetadataType mdtype) const;
- size_t waste_words(MetadataType mdtype) const;
size_t used_bytes_slow(MetadataType mdtype) const;
size_t capacity_bytes_slow(MetadataType mdtype) const;
@@ -213,27 +212,22 @@
void iterate(AllocRecordClosure *closure);
- // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
+ // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
static bool using_class_space() {
- return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
+ return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
}
};
class MetaspaceAux : AllStatic {
- static size_t free_chunks_total(Metaspace::MetadataType mdtype);
-
- public:
- // Statistics for class space and data space in metaspace.
+ static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
// These methods iterate over the classloader data graph
// for the given Metaspace type. These are slow.
static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
- static size_t free_in_bytes(Metaspace::MetadataType mdtype);
+ static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
-
- // Iterates over the virtual space list.
- static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
+ static size_t capacity_bytes_slow();
// Running sum of space in all Metachunks that has been
// allocated to a Metaspace. This is used instead of
@@ -263,17 +257,16 @@
}
// Used by MetaspaceCounters
- static size_t free_chunks_total();
- static size_t free_chunks_total_in_bytes();
- static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
+ static size_t free_chunks_total_words();
+ static size_t free_chunks_total_bytes();
+ static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
return _allocated_capacity_words[mdtype];
}
static size_t allocated_capacity_words() {
- return _allocated_capacity_words[Metaspace::NonClassType] +
- (Metaspace::using_class_space() ?
- _allocated_capacity_words[Metaspace::ClassType] : 0);
+ return allocated_capacity_words(Metaspace::NonClassType) +
+ allocated_capacity_words(Metaspace::ClassType);
}
static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
return allocated_capacity_words(mdtype) * BytesPerWord;
@@ -286,9 +279,8 @@
return _allocated_used_words[mdtype];
}
static size_t allocated_used_words() {
- return _allocated_used_words[Metaspace::NonClassType] +
- (Metaspace::using_class_space() ?
- _allocated_used_words[Metaspace::ClassType] : 0);
+ return allocated_used_words(Metaspace::NonClassType) +
+ allocated_used_words(Metaspace::ClassType);
}
static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
return allocated_used_words(mdtype) * BytesPerWord;
@@ -300,31 +292,22 @@
static size_t free_bytes();
static size_t free_bytes(Metaspace::MetadataType mdtype);
- // Total capacity in all Metaspaces
- static size_t capacity_bytes_slow() {
-#ifdef PRODUCT
- // Use allocated_capacity_bytes() in PRODUCT instead of this function.
- guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
-#endif
- size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
- size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
- assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
- err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
- " class_capacity + non_class_capacity " SIZE_FORMAT
- " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
- allocated_capacity_bytes(), class_capacity + non_class_capacity,
- class_capacity, non_class_capacity));
-
- return class_capacity + non_class_capacity;
+ static size_t reserved_bytes(Metaspace::MetadataType mdtype);
+ static size_t reserved_bytes() {
+ return reserved_bytes(Metaspace::ClassType) +
+ reserved_bytes(Metaspace::NonClassType);
}
- // Total space reserved in all Metaspaces
- static size_t reserved_in_bytes() {
- return reserved_in_bytes(Metaspace::ClassType) +
- reserved_in_bytes(Metaspace::NonClassType);
+ static size_t committed_bytes(Metaspace::MetadataType mdtype);
+ static size_t committed_bytes() {
+ return committed_bytes(Metaspace::ClassType) +
+ committed_bytes(Metaspace::NonClassType);
}
- static size_t min_chunk_size();
+ static size_t min_chunk_size_words();
+ static size_t min_chunk_size_bytes() {
+ return min_chunk_size_words() * BytesPerWord;
+ }
// Print change in used metadata.
static void print_metaspace_change(size_t prev_metadata_used);
--- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -65,26 +65,25 @@
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
-size_t MetaspaceCounters::calculate_capacity() {
- // The total capacity is the sum of
- // 1) capacity of Metachunks in use by all Metaspaces
- // 2) unused space at the end of each Metachunk
- // 3) space in the freelist
- size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
- + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
- return total_capacity;
+size_t MetaspaceCounters::used() {
+ return MetaspaceAux::allocated_used_bytes();
+}
+
+size_t MetaspaceCounters::capacity() {
+ return MetaspaceAux::committed_bytes();
+}
+
+size_t MetaspaceCounters::max_capacity() {
+ return MetaspaceAux::reserved_bytes();
}
void MetaspaceCounters::initialize_performance_counters() {
if (UsePerfData) {
assert(_perf_counters == NULL, "Should only be initialized once");
- size_t min_capacity = MetaspaceAux::min_chunk_size();
- size_t capacity = calculate_capacity();
- size_t max_capacity = MetaspaceAux::reserved_in_bytes();
- size_t used = MetaspaceAux::allocated_used_bytes();
-
- _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
+ size_t min_capacity = 0;
+ _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
+ capacity(), max_capacity(), used());
}
}
@@ -92,31 +91,29 @@
if (UsePerfData) {
assert(_perf_counters != NULL, "Should be initialized");
- size_t capacity = calculate_capacity();
- size_t max_capacity = MetaspaceAux::reserved_in_bytes();
- size_t used = MetaspaceAux::allocated_used_bytes();
-
- _perf_counters->update(capacity, max_capacity, used);
+ _perf_counters->update(capacity(), max_capacity(), used());
}
}
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
-size_t CompressedClassSpaceCounters::calculate_capacity() {
- return MetaspaceAux::allocated_capacity_bytes(_class_type) +
- MetaspaceAux::free_bytes(_class_type) +
- MetaspaceAux::free_chunks_total_in_bytes(_class_type);
+size_t CompressedClassSpaceCounters::used() {
+ return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::capacity() {
+ return MetaspaceAux::committed_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::max_capacity() {
+ return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
}
void CompressedClassSpaceCounters::update_performance_counters() {
- if (UsePerfData && UseCompressedKlassPointers) {
+ if (UsePerfData && UseCompressedClassPointers) {
assert(_perf_counters != NULL, "Should be initialized");
- size_t capacity = calculate_capacity();
- size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
- size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
- _perf_counters->update(capacity, max_capacity, used);
+ _perf_counters->update(capacity(), max_capacity(), used());
}
}
@@ -125,13 +122,10 @@
assert(_perf_counters == NULL, "Should only be initialized once");
const char* ns = "compressedclassspace";
- if (UseCompressedKlassPointers) {
- size_t min_capacity = MetaspaceAux::min_chunk_size();
- size_t capacity = calculate_capacity();
- size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
- size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
- _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
+ if (UseCompressedClassPointers) {
+ size_t min_capacity = 0;
+ _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
+ max_capacity(), used());
} else {
_perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
}
--- a/hotspot/src/share/vm/memory/metaspaceCounters.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -25,13 +25,15 @@
#ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
#define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
-#include "memory/metaspace.hpp"
+#include "memory/allocation.hpp"
class MetaspacePerfCounters;
class MetaspaceCounters: public AllStatic {
static MetaspacePerfCounters* _perf_counters;
- static size_t calculate_capacity();
+ static size_t used();
+ static size_t capacity();
+ static size_t max_capacity();
public:
static void initialize_performance_counters();
@@ -40,8 +42,9 @@
class CompressedClassSpaceCounters: public AllStatic {
static MetaspacePerfCounters* _perf_counters;
- static size_t calculate_capacity();
- static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
+ static size_t used();
+ static size_t capacity();
+ static size_t max_capacity();
public:
static void initialize_performance_counters();
--- a/hotspot/src/share/vm/memory/universe.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/universe.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -872,13 +872,16 @@
// Reserve the Java heap, which is now the same for all GCs.
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
+ assert(alignment <= Arguments::conservative_max_heap_alignment(),
+ err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
+ alignment, Arguments::conservative_max_heap_alignment()));
size_t total_reserved = align_size_up(heap_size, alignment);
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
assert(!UseLargePages
- || UseParallelOldGC
+ || UseParallelGC
|| use_large_pages, "Wrong alignment to use large pages");
char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
@@ -1028,7 +1031,7 @@
msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
- msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
+ msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
--- a/hotspot/src/share/vm/memory/universe.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/memory/universe.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -181,7 +181,7 @@
// For UseCompressedOops.
static struct NarrowPtrStruct _narrow_oop;
- // For UseCompressedKlassPointers.
+ // For UseCompressedClassPointers.
static struct NarrowPtrStruct _narrow_klass;
static address _narrow_ptrs_base;
@@ -229,7 +229,7 @@
_narrow_oop._base = base;
}
static void set_narrow_klass_base(address base) {
- assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
+ assert(UseCompressedClassPointers, "no compressed klass ptrs?");
_narrow_klass._base = base;
}
static void set_narrow_oop_use_implicit_null_checks(bool use) {
@@ -353,7 +353,7 @@
static int narrow_oop_shift() { return _narrow_oop._shift; }
static bool narrow_oop_use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
- // For UseCompressedKlassPointers
+ // For UseCompressedClassPointers
static address narrow_klass_base() { return _narrow_klass._base; }
static bool is_narrow_klass_base(void* addr) { return (narrow_klass_base() == (address)addr); }
static int narrow_klass_shift() { return _narrow_klass._shift; }
--- a/hotspot/src/share/vm/oops/arrayOop.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -65,7 +65,7 @@
// declared nonstatic fields in arrayOopDesc if not compressed, otherwise
// it occupies the second half of the _klass field in oopDesc.
static int length_offset_in_bytes() {
- return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
+ return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
sizeof(arrayOopDesc);
}
--- a/hotspot/src/share/vm/oops/instanceOop.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/oops/instanceOop.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -37,9 +37,9 @@
// If compressed, the offset of the fields of the instance may not be aligned.
static int base_offset_in_bytes() {
- // offset computation code breaks if UseCompressedKlassPointers
+ // offset computation code breaks if UseCompressedClassPointers
// only is true
- return (UseCompressedOops && UseCompressedKlassPointers) ?
+ return (UseCompressedOops && UseCompressedClassPointers) ?
klass_gap_offset_in_bytes() :
sizeof(instanceOopDesc);
}
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -69,7 +69,7 @@
}
inline Klass* oopDesc::klass() const {
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
return Klass::decode_klass_not_null(_metadata._compressed_klass);
} else {
return _metadata._klass;
@@ -78,7 +78,7 @@
inline Klass* oopDesc::klass_or_null() const volatile {
// can be NULL in CMS
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
return Klass::decode_klass(_metadata._compressed_klass);
} else {
return _metadata._klass;
@@ -86,19 +86,19 @@
}
inline int oopDesc::klass_gap_offset_in_bytes() {
- assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
+ assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
}
inline Klass** oopDesc::klass_addr() {
// Only used internally and with CMS and will not work with
// UseCompressedOops
- assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
+ assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
return (Klass**) &_metadata._klass;
}
inline narrowKlass* oopDesc::compressed_klass_addr() {
- assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
+ assert(UseCompressedClassPointers, "only called by compressed klass pointers");
return &_metadata._compressed_klass;
}
@@ -106,7 +106,7 @@
// since klasses are promoted no store check is needed
assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
*compressed_klass_addr() = Klass::encode_klass_not_null(k);
} else {
*klass_addr() = k;
@@ -118,7 +118,7 @@
}
inline void oopDesc::set_klass_gap(int v) {
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
*(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
}
}
@@ -126,7 +126,7 @@
inline void oopDesc::set_klass_to_list_ptr(oop k) {
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
_metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
} else {
_metadata._klass = (Klass*)(address)k;
@@ -135,7 +135,7 @@
inline oop oopDesc::list_ptr_from_klass() {
// This is only to be used during GC, for from-space objects.
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
return decode_heap_oop((narrowOop)_metadata._compressed_klass);
} else {
// Special case for GC
--- a/hotspot/src/share/vm/opto/cfgnode.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -1932,7 +1932,7 @@
#ifdef _LP64
// Push DecodeN/DecodeNKlass down through phi.
// The rest of phi graph will transform by split EncodeP node though phis up.
- if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
+ if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
bool may_push = true;
bool has_decodeN = false;
bool is_decodeN = false;
--- a/hotspot/src/share/vm/opto/compile.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/compile.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -2646,7 +2646,7 @@
addp->in(AddPNode::Base) == n->in(AddPNode::Base),
"Base pointers must match" );
#ifdef _LP64
- if ((UseCompressedOops || UseCompressedKlassPointers) &&
+ if ((UseCompressedOops || UseCompressedClassPointers) &&
addp->Opcode() == Op_ConP &&
addp == n->in(AddPNode::Base) &&
n->in(AddPNode::Offset)->is_Con()) {
@@ -3033,7 +3033,7 @@
// Skip next transformation if compressed oops are not used.
if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
- (!UseCompressedOops && !UseCompressedKlassPointers))
+ (!UseCompressedOops && !UseCompressedClassPointers))
return;
// Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
--- a/hotspot/src/share/vm/opto/connode.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/connode.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -630,7 +630,7 @@
if (t == Type::TOP) return Type::TOP;
assert (t != TypePtr::NULL_PTR, "null klass?");
- assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
+ assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
return t->make_narrowklass();
}
--- a/hotspot/src/share/vm/opto/library_call.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/library_call.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -4204,7 +4204,7 @@
// 12 - 64-bit VM, compressed klass
// 16 - 64-bit VM, normal klass
if (base_off % BytesPerLong != 0) {
- assert(UseCompressedKlassPointers, "");
+ assert(UseCompressedClassPointers, "");
if (is_array) {
// Exclude length to copy by 8 bytes words.
base_off += sizeof(int);
--- a/hotspot/src/share/vm/opto/live.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/live.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -321,7 +321,7 @@
#ifdef _LP64
UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
- UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
+ UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
#endif
check->as_Mach()->ideal_Opcode() == Op_LoadP ||
check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
--- a/hotspot/src/share/vm/opto/macro.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/macro.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -2191,7 +2191,7 @@
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
#ifdef _LP64
- if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
+ if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
klass_node->in(1)->init_req(0, ctrl);
} else
--- a/hotspot/src/share/vm/opto/memnode.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/memnode.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -2031,7 +2031,7 @@
assert(adr_type != NULL, "expecting TypeKlassPtr");
#ifdef _LP64
if (adr_type->is_ptr_to_narrowklass()) {
- assert(UseCompressedKlassPointers, "no compressed klasses");
+ assert(UseCompressedClassPointers, "no compressed klasses");
Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
}
@@ -2369,7 +2369,7 @@
val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
} else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
- (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
+ (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
adr->bottom_type()->isa_rawptr())) {
val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
--- a/hotspot/src/share/vm/opto/type.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/opto/type.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -2416,7 +2416,7 @@
#ifdef _LP64
if (_offset != 0) {
if (_offset == oopDesc::klass_offset_in_bytes()) {
- _is_ptr_to_narrowklass = UseCompressedKlassPointers;
+ _is_ptr_to_narrowklass = UseCompressedClassPointers;
} else if (klass() == NULL) {
// Array with unknown body type
assert(this->isa_aryptr(), "only arrays without klass");
--- a/hotspot/src/share/vm/prims/jni.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/prims/jni.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -5049,12 +5049,16 @@
// Forward declaration
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
+void TestVirtualSpace_test();
+void MetaspaceAux_test();
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
tty->print_cr("Running internal VM tests");
run_unit_test(TestReservedSpace_test());
run_unit_test(TestReserveMemorySpecial_test());
+ run_unit_test(TestVirtualSpace_test());
+ run_unit_test(MetaspaceAux_test());
run_unit_test(GlobalDefinitions::test_globals());
run_unit_test(GCTimerAllTest::all());
run_unit_test(arrayOopDesc::test_max_array_length());
--- a/hotspot/src/share/vm/prims/whitebox.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/prims/whitebox.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -33,6 +33,7 @@
#include "prims/whitebox.hpp"
#include "prims/wbtestmethods/parserTests.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
@@ -94,6 +95,11 @@
return closure.found();
WB_END
+WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
+ return (jlong)Arguments::max_heap_for_compressed_oops();
+}
+WB_END
+
WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
CollectorPolicy * p = Universe::heap()->collector_policy();
gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
@@ -436,6 +442,8 @@
CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
(void*) &WB_ParseCommandLine
},
+ {CC"getCompressedOopsMaxHeapSize", CC"()J",
+ (void*)&WB_GetCompressedOopsMaxHeapSize},
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
#if INCLUDE_ALL_GCS
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
--- a/hotspot/src/share/vm/runtime/arguments.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -28,6 +28,7 @@
#include "compiler/compilerOracle.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/cardTableRS.hpp"
+#include "memory/genCollectedHeap.hpp"
#include "memory/referenceProcessor.hpp"
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
@@ -54,6 +55,8 @@
#endif
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#endif // INCLUDE_ALL_GCS
// Note: This is a special bug reporting site for the JVM
@@ -90,6 +93,7 @@
SystemProperty* Arguments::_system_properties = NULL;
const char* Arguments::_gc_log_filename = NULL;
bool Arguments::_has_profile = false;
+size_t Arguments::_conservative_max_heap_alignment = 0;
uintx Arguments::_min_heap_size = 0;
Arguments::Mode Arguments::_mode = _mixed;
bool Arguments::_java_compiler = false;
@@ -1391,10 +1395,17 @@
return true;
}
-inline uintx max_heap_for_compressed_oops() {
+uintx Arguments::max_heap_for_compressed_oops() {
// Avoid sign flip.
assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
- LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size());
+ // We need to fit both the NULL page and the heap into the memory budget, while
+ // keeping alignment constraints of the heap. To guarantee the latter, as the
+ // NULL page is located before the heap, we pad the NULL page to the conservative
+ // maximum alignment that the GC may ever impose upon the heap.
+ size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
+ Arguments::conservative_max_heap_alignment());
+
+ LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
NOT_LP64(ShouldNotReachHere(); return 0);
}
@@ -1439,7 +1450,7 @@
if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
warning("Max heap size too large for Compressed Oops");
FLAG_SET_DEFAULT(UseCompressedOops, false);
- FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+ FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
}
}
#endif // _LP64
@@ -1452,22 +1463,22 @@
void Arguments::set_use_compressed_klass_ptrs() {
#ifndef ZERO
#ifdef _LP64
- // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
+ // UseCompressedOops must be on for UseCompressedClassPointers to be on.
if (!UseCompressedOops) {
- if (UseCompressedKlassPointers) {
- warning("UseCompressedKlassPointers requires UseCompressedOops");
+ if (UseCompressedClassPointers) {
+ warning("UseCompressedClassPointers requires UseCompressedOops");
}
- FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+ FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
} else {
- // Turn on UseCompressedKlassPointers too
- if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
- FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
+ // Turn on UseCompressedClassPointers too
+ if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
+ FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
}
- // Check the ClassMetaspaceSize to make sure we use compressed klass ptrs.
- if (UseCompressedKlassPointers) {
- if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
- warning("Class metaspace size is too large for UseCompressedKlassPointers");
- FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+ // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
+ if (UseCompressedClassPointers) {
+ if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
+ warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
+ FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
}
}
}
@@ -1475,6 +1486,23 @@
#endif // !ZERO
}
+void Arguments::set_conservative_max_heap_alignment() {
+ // The conservative maximum required alignment for the heap is the maximum of
+ // the alignments imposed by several sources: any requirements from the heap
+ // itself, the collector policy and the maximum page size we may run the VM
+ // with.
+ size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
+#if INCLUDE_ALL_GCS
+ if (UseParallelGC) {
+ heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
+ } else if (UseG1GC) {
+ heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
+ }
+#endif // INCLUDE_ALL_GCS
+ _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
+ CollectorPolicy::compute_max_alignment());
+}
+
void Arguments::set_ergonomics_flags() {
if (os::is_server_class_machine()) {
@@ -1503,6 +1531,8 @@
}
}
+ set_conservative_max_heap_alignment();
+
#ifndef ZERO
#ifdef _LP64
set_use_compressed_oops();
@@ -2193,8 +2223,8 @@
status = status && verify_object_alignment();
- status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G,
- "ClassMetaspaceSize");
+ status = status && verify_interval(CompressedClassSpaceSize, 1*M, 3*G,
+ "CompressedClassSpaceSize");
status = status && verify_interval(MarkStackSizeMax,
1, (max_jint - 1), "MarkStackSizeMax");
@@ -3326,13 +3356,13 @@
}
UseSharedSpaces = false;
#ifdef _LP64
- if (!UseCompressedOops || !UseCompressedKlassPointers) {
+ if (!UseCompressedOops || !UseCompressedClassPointers) {
vm_exit_during_initialization(
- "Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL);
+ "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
}
} else {
- // UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces.
- if (!UseCompressedOops || !UseCompressedKlassPointers) {
+ // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
+ if (!UseCompressedOops || !UseCompressedClassPointers) {
no_shared_spaces();
}
#endif
@@ -3558,6 +3588,11 @@
no_shared_spaces();
#endif // INCLUDE_CDS
+ return JNI_OK;
+}
+
+jint Arguments::apply_ergo() {
+
// Set flags based on ergonomics.
set_ergonomics_flags();
@@ -3633,7 +3668,7 @@
FLAG_SET_DEFAULT(ProfileInterpreter, false);
FLAG_SET_DEFAULT(UseBiasedLocking, false);
LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
- LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedKlassPointers, false));
+ LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
#endif // CC_INTERP
#ifdef COMPILER2
@@ -3662,6 +3697,10 @@
DebugNonSafepoints = true;
}
+ if (FLAG_IS_CMDLINE(CompressedClassSpaceSize) && !UseCompressedClassPointers) {
+ warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used");
+ }
+
#ifndef PRODUCT
if (CompileTheWorld) {
// Force NmethodSweeper to sweep whole CodeCache each time.
--- a/hotspot/src/share/vm/runtime/arguments.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/arguments.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -280,6 +280,9 @@
// Option flags
static bool _has_profile;
static const char* _gc_log_filename;
+ // Value of the conservative maximum heap alignment needed
+ static size_t _conservative_max_heap_alignment;
+
static uintx _min_heap_size;
// -Xrun arguments
@@ -327,6 +330,7 @@
// Garbage-First (UseG1GC)
static void set_g1_gc_flags();
// GC ergonomics
+ static void set_conservative_max_heap_alignment();
static void set_use_compressed_oops();
static void set_use_compressed_klass_ptrs();
static void set_ergonomics_flags();
@@ -430,8 +434,10 @@
static char* SharedArchivePath;
public:
- // Parses the arguments
+ // Parses the arguments, first phase
static jint parse(const JavaVMInitArgs* args);
+ // Apply ergonomics
+ static jint apply_ergo();
// Adjusts the arguments after the OS have adjusted the arguments
static jint adjust_after_os();
// Check for consistency in the selection of the garbage collector.
@@ -445,6 +451,10 @@
// Used by os_solaris
static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
+ static size_t conservative_max_heap_alignment() { return _conservative_max_heap_alignment; }
+ // Return the maximum size a heap with compressed oops can take
+ static size_t max_heap_for_compressed_oops();
+
// return a char* array containing all options
static char** jvm_flags_array() { return _jvm_flags_array; }
static char** jvm_args_array() { return _jvm_args_array; }
--- a/hotspot/src/share/vm/runtime/globals.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/globals.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -443,8 +443,8 @@
"Use 32-bit object references in 64-bit VM " \
"lp64_product means flag is always constant in 32 bit VM") \
\
- lp64_product(bool, UseCompressedKlassPointers, false, \
- "Use 32-bit klass pointers in 64-bit VM " \
+ lp64_product(bool, UseCompressedClassPointers, false, \
+ "Use 32-bit class pointers in 64-bit VM " \
"lp64_product means flag is always constant in 32 bit VM") \
\
notproduct(bool, CheckCompressedOops, true, \
@@ -3039,9 +3039,9 @@
product(uintx, MaxMetaspaceSize, max_uintx, \
"Maximum size of Metaspaces (in bytes)") \
\
- product(uintx, ClassMetaspaceSize, 1*G, \
- "Maximum size of InstanceKlass area in Metaspace used for " \
- "UseCompressedKlassPointers") \
+ product(uintx, CompressedClassSpaceSize, 1*G, \
+ "Maximum size of class area in Metaspace when compressed " \
+ "class pointers are used") \
\
product(uintx, MinHeapFreeRatio, 40, \
"Min percentage of heap free after GC to avoid expansion") \
--- a/hotspot/src/share/vm/runtime/os.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/os.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -314,6 +314,11 @@
}
}
+void os::init_before_ergo() {
+ // We need to initialize large page support here because ergonomics takes some
+ // decisions depending on large page support and the calculated large page size.
+ large_page_init();
+}
void os::signal_init() {
if (!ReduceSignalUsage) {
--- a/hotspot/src/share/vm/runtime/os.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/os.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -139,7 +139,10 @@
public:
static void init(void); // Called before command line parsing
+ static void init_before_ergo(void); // Called after command line parsing
+ // before VM ergonomics processing.
static jint init_2(void); // Called after command line parsing
+ // and VM ergonomics processing
static void init_globals(void) { // Called from init_globals() in init.cpp
init_globals_ext();
}
@@ -254,6 +257,11 @@
static size_t page_size_for_region(size_t region_min_size,
size_t region_max_size,
uint min_pages);
+ // Return the largest page size that can be used
+ static size_t max_page_size() {
+ // The _page_sizes array is sorted in descending order.
+ return _page_sizes[0];
+ }
// Methods for tracing page sizes returned by the above method; enabled by
// TracePageSizes. The region_{min,max}_size parameters should be the values
--- a/hotspot/src/share/vm/runtime/thread.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/thread.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -3331,6 +3331,11 @@
jint parse_result = Arguments::parse(args);
if (parse_result != JNI_OK) return parse_result;
+ os::init_before_ergo();
+
+ jint ergo_result = Arguments::apply_ergo();
+ if (ergo_result != JNI_OK) return ergo_result;
+
if (PauseAtStartup) {
os::pause();
}
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -453,6 +453,42 @@
return reserved_size() - committed_size();
}
+size_t VirtualSpace::actual_committed_size() const {
+ // Special VirtualSpaces commit all reserved space up front.
+ if (special()) {
+ return reserved_size();
+ }
+
+ size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
+ size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
+ size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
+
+#ifdef ASSERT
+ size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
+ size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
+ size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
+
+ if (committed_high > 0) {
+ assert(committed_low == lower, "Must be");
+ assert(committed_middle == middle, "Must be");
+ }
+
+ if (committed_middle > 0) {
+ assert(committed_low == lower, "Must be");
+ }
+ if (committed_middle < middle) {
+ assert(committed_high == 0, "Must be");
+ }
+
+ if (committed_low < lower) {
+ assert(committed_high == 0, "Must be");
+ assert(committed_middle == 0, "Must be");
+ }
+#endif
+
+ return committed_low + committed_middle + committed_high;
+}
+
bool VirtualSpace::contains(const void* p) const {
return low() <= (const char*) p && (const char*) p < high();
@@ -718,16 +754,19 @@
assert(high() <= upper_high(), "upper high");
}
-void VirtualSpace::print() {
- tty->print ("Virtual space:");
- if (special()) tty->print(" (pinned in memory)");
- tty->cr();
- tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
- tty->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
- tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
- tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
+void VirtualSpace::print_on(outputStream* out) {
+ out->print ("Virtual space:");
+ if (special()) out->print(" (pinned in memory)");
+ out->cr();
+ out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
+ out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
+ out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
+ out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
}
+void VirtualSpace::print() {
+ print_on(tty);
+}
/////////////// Unit tests ///////////////
@@ -910,6 +949,109 @@
TestReservedSpace::test_reserved_space();
}
+#define assert_equals(actual, expected) \
+ assert(actual == expected, \
+ err_msg("Got " SIZE_FORMAT " expected " \
+ SIZE_FORMAT, actual, expected));
+
+#define assert_ge(value1, value2) \
+ assert(value1 >= value2, \
+ err_msg("'" #value1 "': " SIZE_FORMAT " '" \
+ #value2 "': " SIZE_FORMAT, value1, value2));
+
+#define assert_lt(value1, value2) \
+ assert(value1 < value2, \
+ err_msg("'" #value1 "': " SIZE_FORMAT " '" \
+ #value2 "': " SIZE_FORMAT, value1, value2));
+
+
+class TestVirtualSpace : AllStatic {
+ public:
+ static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
+ size_t granularity = os::vm_allocation_granularity();
+ size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
+
+ ReservedSpace reserved(reserve_size_aligned);
+
+ assert(reserved.is_reserved(), "Must be");
+
+ VirtualSpace vs;
+ bool initialized = vs.initialize(reserved, 0);
+ assert(initialized, "Failed to initialize VirtualSpace");
+
+ vs.expand_by(commit_size, false);
+
+ if (vs.special()) {
+ assert_equals(vs.actual_committed_size(), reserve_size_aligned);
+ } else {
+ assert_ge(vs.actual_committed_size(), commit_size);
+ // Approximate the commit granularity.
+ size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
+ assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
+ }
+
+ reserved.release();
+ }
+
+ static void test_virtual_space_actual_committed_space_one_large_page() {
+ if (!UseLargePages) {
+ return;
+ }
+
+ size_t large_page_size = os::large_page_size();
+
+ ReservedSpace reserved(large_page_size, large_page_size, true, false);
+
+ assert(reserved.is_reserved(), "Must be");
+
+ VirtualSpace vs;
+ bool initialized = vs.initialize(reserved, 0);
+ assert(initialized, "Failed to initialize VirtualSpace");
+
+ vs.expand_by(large_page_size, false);
+
+ assert_equals(vs.actual_committed_size(), large_page_size);
+
+ reserved.release();
+ }
+
+ static void test_virtual_space_actual_committed_space() {
+ test_virtual_space_actual_committed_space(4 * K, 0);
+ test_virtual_space_actual_committed_space(4 * K, 4 * K);
+ test_virtual_space_actual_committed_space(8 * K, 0);
+ test_virtual_space_actual_committed_space(8 * K, 4 * K);
+ test_virtual_space_actual_committed_space(8 * K, 8 * K);
+ test_virtual_space_actual_committed_space(12 * K, 0);
+ test_virtual_space_actual_committed_space(12 * K, 4 * K);
+ test_virtual_space_actual_committed_space(12 * K, 8 * K);
+ test_virtual_space_actual_committed_space(12 * K, 12 * K);
+ test_virtual_space_actual_committed_space(64 * K, 0);
+ test_virtual_space_actual_committed_space(64 * K, 32 * K);
+ test_virtual_space_actual_committed_space(64 * K, 64 * K);
+ test_virtual_space_actual_committed_space(2 * M, 0);
+ test_virtual_space_actual_committed_space(2 * M, 4 * K);
+ test_virtual_space_actual_committed_space(2 * M, 64 * K);
+ test_virtual_space_actual_committed_space(2 * M, 1 * M);
+ test_virtual_space_actual_committed_space(2 * M, 2 * M);
+ test_virtual_space_actual_committed_space(10 * M, 0);
+ test_virtual_space_actual_committed_space(10 * M, 4 * K);
+ test_virtual_space_actual_committed_space(10 * M, 8 * K);
+ test_virtual_space_actual_committed_space(10 * M, 1 * M);
+ test_virtual_space_actual_committed_space(10 * M, 2 * M);
+ test_virtual_space_actual_committed_space(10 * M, 5 * M);
+ test_virtual_space_actual_committed_space(10 * M, 10 * M);
+ }
+
+ static void test_virtual_space() {
+ test_virtual_space_actual_committed_space();
+ test_virtual_space_actual_committed_space_one_large_page();
+ }
+};
+
+void TestVirtualSpace_test() {
+ TestVirtualSpace::test_virtual_space();
+}
+
#endif // PRODUCT
#endif
--- a/hotspot/src/share/vm/runtime/virtualspace.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/runtime/virtualspace.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -183,11 +183,16 @@
// Destruction
~VirtualSpace();
- // Testers (all sizes are byte sizes)
- size_t committed_size() const;
- size_t reserved_size() const;
+ // Reserved memory
+ size_t reserved_size() const;
+ // Actually committed OS memory
+ size_t actual_committed_size() const;
+ // Memory used/expanded in this virtual space
+ size_t committed_size() const;
+ // Memory left to use/expand in this virtual space
size_t uncommitted_size() const;
- bool contains(const void* p) const;
+
+ bool contains(const void* p) const;
// Operations
// returns true on success, false otherwise
@@ -198,7 +203,8 @@
void check_for_contiguity() PRODUCT_RETURN;
// Debugging
- void print() PRODUCT_RETURN;
+ void print_on(outputStream* out) PRODUCT_RETURN;
+ void print();
};
#endif // SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
--- a/hotspot/src/share/vm/services/memoryPool.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/services/memoryPool.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -260,10 +260,10 @@
}
MetaspacePool::MetaspacePool() :
- MemoryPool("Metaspace", NonHeap, capacity_in_bytes(), calculate_max_size(), true, false) { }
+ MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
MemoryUsage MetaspacePool::get_memory_usage() {
- size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
+ size_t committed = MetaspaceAux::committed_bytes();
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
@@ -271,26 +271,19 @@
return MetaspaceAux::allocated_used_bytes();
}
-size_t MetaspacePool::capacity_in_bytes() const {
- return MetaspaceAux::allocated_capacity_bytes();
-}
-
size_t MetaspacePool::calculate_max_size() const {
- return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize : max_uintx;
+ return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize :
+ MemoryUsage::undefined_size();
}
CompressedKlassSpacePool::CompressedKlassSpacePool() :
- MemoryPool("Compressed Class Space", NonHeap, capacity_in_bytes(), ClassMetaspaceSize, true, false) { }
+ MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
size_t CompressedKlassSpacePool::used_in_bytes() {
return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
}
-size_t CompressedKlassSpacePool::capacity_in_bytes() const {
- return MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
-}
-
MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
- size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
+ size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
--- a/hotspot/src/share/vm/services/memoryPool.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/services/memoryPool.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -224,7 +224,6 @@
class MetaspacePool : public MemoryPool {
size_t calculate_max_size() const;
- size_t capacity_in_bytes() const;
public:
MetaspacePool();
MemoryUsage get_memory_usage();
@@ -232,7 +231,6 @@
};
class CompressedKlassSpacePool : public MemoryPool {
- size_t capacity_in_bytes() const;
public:
CompressedKlassSpacePool();
MemoryUsage get_memory_usage();
--- a/hotspot/src/share/vm/services/memoryService.cpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/services/memoryService.cpp Thu Sep 19 09:26:08 2013 +0200
@@ -409,7 +409,7 @@
mgr->add_pool(_metaspace_pool);
_pools_list->append(_metaspace_pool);
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
_compressed_class_pool = new CompressedKlassSpacePool();
mgr->add_pool(_compressed_class_pool);
_pools_list->append(_compressed_class_pool);
--- a/hotspot/src/share/vm/services/memoryUsage.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/services/memoryUsage.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -63,10 +63,12 @@
size_t committed() const { return _committed; }
size_t max_size() const { return _maxSize; }
+ static size_t undefined_size() { return (size_t) -1; }
+
inline static jlong convert_to_jlong(size_t val) {
// In the 64-bit vm, a size_t can overflow a jlong (which is signed).
jlong ret;
- if (val == (size_t)-1) {
+ if (val == undefined_size()) {
ret = -1L;
} else {
NOT_LP64(ret = val;)
--- a/hotspot/src/share/vm/utilities/bitMap.inline.hpp Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/src/share/vm/utilities/bitMap.inline.hpp Thu Sep 19 09:26:08 2013 +0200
@@ -52,16 +52,16 @@
inline bool BitMap::par_set_bit(idx_t bit) {
verify_index(bit);
- volatile idx_t* const addr = word_addr(bit);
- const idx_t mask = bit_mask(bit);
- idx_t old_val = *addr;
+ volatile bm_word_t* const addr = word_addr(bit);
+ const bm_word_t mask = bit_mask(bit);
+ bm_word_t old_val = *addr;
do {
- const idx_t new_val = old_val | mask;
+ const bm_word_t new_val = old_val | mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+ const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
(volatile void*) addr,
(void*) old_val);
if (cur_val == old_val) {
@@ -73,16 +73,16 @@
inline bool BitMap::par_clear_bit(idx_t bit) {
verify_index(bit);
- volatile idx_t* const addr = word_addr(bit);
- const idx_t mask = ~bit_mask(bit);
- idx_t old_val = *addr;
+ volatile bm_word_t* const addr = word_addr(bit);
+ const bm_word_t mask = ~bit_mask(bit);
+ bm_word_t old_val = *addr;
do {
- const idx_t new_val = old_val & mask;
+ const bm_word_t new_val = old_val & mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+ const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
(volatile void*) addr,
(void*) old_val);
if (cur_val == old_val) {
--- a/hotspot/test/TEST.groups Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/TEST.groups Thu Sep 19 09:26:08 2013 +0200
@@ -62,7 +62,7 @@
#
needs_jdk = \
gc/TestG1ZeroPGCTJcmdThreadPrint.java \
- gc/metaspace/ClassMetaspaceSizeInJmapHeap.java \
+ gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
gc/metaspace/TestMetaspacePerfCounters.java \
runtime/6819213/TestBootNativeLibraryPath.java \
runtime/6878713/Test6878713.sh \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/TestObjectAlignment.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestObjectAlignment
+ * @key gc
+ * @bug 8021823
+ * @summary G1: Concurrent marking crashes with -XX:ObjectAlignmentInBytes>=32 in 64bit VMs
+ * @library /testlibrary
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestObjectAlignment {
+
+ public static byte[] garbage;
+
+ private static boolean runsOn32bit() {
+ return System.getProperty("sun.arch.data.model").equals("32");
+ }
+
+ public static void main(String[] args) throws Exception {
+ if (runsOn32bit()) {
+ // 32 bit VMs do not allow setting ObjectAlignmentInBytes, so there is nothing to test. We still get called.
+ return;
+ }
+ for (int i = 0; i < 10; i++) {
+ garbage = new byte[1000];
+ System.gc();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestAlignmentToUseLargePages
+ * @summary All parallel GC variants may use large pages without the requirement that the
+ * heap alignment is large page aligned. Other collectors also need to start up with odd sized heaps.
+ * @bug 8024396
+ * @key gc
+ * @key regression
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:-UseLargePages TestAlignmentToUseLargePages
+ */
+
+public class TestAlignmentToUseLargePages {
+ public static void main(String args[]) throws Exception {
+ // nothing to do
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestCompressedClassFlags.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test
+ * @bug 8015107
+ * @summary Tests that VM prints a warning when -XX:CompressedClassSpaceSize
+ * is used together with -XX:-UseCompressedClassPointers
+ * @library /testlibrary
+ */
+public class TestCompressedClassFlags {
+ public static void main(String[] args) throws Exception {
+ if (Platform.is64bit()) {
+ OutputAnalyzer output = runJava("-XX:CompressedClassSpaceSize=1g",
+ "-XX:-UseCompressedClassPointers",
+ "-version");
+ output.shouldContain("warning");
+ output.shouldNotContain("error");
+ output.shouldHaveExitValue(0);
+ }
+ }
+
+ private static OutputAnalyzer runJava(String ... args) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
+ return new OutputAnalyzer(pb.start());
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,50 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestUseCompressedOopsErgo
+ * @key gc
+ * @bug 8010722
+ * @summary Tests ergonomics for UseCompressedOops.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestUseCompressedOopsErgo TestUseCompressedOopsErgoTools
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseG1GC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseSerialGC
+ */
+
+public class TestUseCompressedOopsErgo {
+
+ public static void main(String args[]) throws Exception {
+ if (!TestUseCompressedOopsErgoTools.is64bitVM()) {
+ // this test is relevant for 64 bit VMs only
+ return;
+ }
+ final String[] gcFlags = args;
+ TestUseCompressedOopsErgoTools.checkCompressedOopsErgo(gcFlags);
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestUseCompressedOopsErgoTools.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,177 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+import sun.management.ManagementFactoryHelper;
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+class DetermineMaxHeapForCompressedOops {
+ public static void main(String[] args) throws Exception {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ System.out.print(wb.getCompressedOopsMaxHeapSize());
+ }
+}
+
+class TestUseCompressedOopsErgoTools {
+
+ private static long getCompressedClassSpaceSize() {
+ HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
+
+ VMOption option = diagnostic.getVMOption("CompressedClassSpaceSize");
+ return Long.parseLong(option.getValue());
+ }
+
+
+ public static long getMaxHeapForCompressedOops(String[] vmargs) throws Exception {
+ OutputAnalyzer output = runWhiteBoxTest(vmargs, DetermineMaxHeapForCompressedOops.class.getName(), new String[] {}, false);
+ return Long.parseLong(output.getStdout());
+ }
+
+ public static boolean is64bitVM() {
+ String val = System.getProperty("sun.arch.data.model");
+ if (val == null) {
+ throw new RuntimeException("Could not read sun.arch.data.model");
+ }
+ if (val.equals("64")) {
+ return true;
+ } else if (val.equals("32")) {
+ return false;
+ }
+ throw new RuntimeException("Unexpected value " + val + " of sun.arch.data.model");
+ }
+
+ /**
+ * Executes a new VM process with the given class and parameters.
+ * @param vmargs Arguments to the VM to run
+ * @param classname Name of the class to run
+ * @param arguments Arguments to the class
+ * @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string
+ * @return The OutputAnalyzer with the results for the invocation.
+ */
+ public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception {
+ ArrayList<String> finalargs = new ArrayList<String>();
+
+ String[] whiteboxOpts = new String[] {
+ "-Xbootclasspath/a:.",
+ "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
+ "-cp", System.getProperty("java.class.path"),
+ };
+
+ if (useTestDotJavaDotOpts) {
+ // System.getProperty("test.java.opts") is '' if no options is set,
+ // we need to skip such a result
+ String[] externalVMOpts = new String[0];
+ if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) {
+ externalVMOpts = System.getProperty("test.java.opts").split(" ");
+ }
+ finalargs.addAll(Arrays.asList(externalVMOpts));
+ }
+
+ finalargs.addAll(Arrays.asList(vmargs));
+ finalargs.addAll(Arrays.asList(whiteboxOpts));
+ finalargs.add(classname);
+ finalargs.addAll(Arrays.asList(arguments));
+
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldHaveExitValue(0);
+ return output;
+ }
+
+ private static String[] join(String[] part1, String part2) {
+ ArrayList<String> result = new ArrayList<String>();
+ result.addAll(Arrays.asList(part1));
+ result.add(part2);
+ return result.toArray(new String[0]);
+ }
+
+ public static void checkCompressedOopsErgo(String[] gcflags) throws Exception {
+ long maxHeapForCompressedOops = getMaxHeapForCompressedOops(gcflags);
+
+ checkUseCompressedOops(gcflags, maxHeapForCompressedOops, true);
+ checkUseCompressedOops(gcflags, maxHeapForCompressedOops - 1, true);
+ checkUseCompressedOops(gcflags, maxHeapForCompressedOops + 1, false);
+
+ // the use of HeapBaseMinAddress should not change the outcome
+ checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops, true);
+ checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops - 1, true);
+ checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops + 1, false);
+
+ // use a different object alignment
+ maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"));
+
+ checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops, true);
+ checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops - 1, true);
+ checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops + 1, false);
+
+ // use a different CompressedClassSpaceSize
+ String compressedClassSpaceSizeArg = "-XX:CompressedClassSpaceSize=" + 2 * getCompressedClassSpaceSize();
+ maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, compressedClassSpaceSizeArg));
+
+ checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops, true);
+ checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops - 1, true);
+ checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops + 1, false);
+ }
+
+ private static void checkUseCompressedOops(String[] args, long heapsize, boolean expectUseCompressedOops) throws Exception {
+ ArrayList<String> finalargs = new ArrayList<String>();
+ finalargs.addAll(Arrays.asList(args));
+ finalargs.add("-Xmx" + heapsize);
+ finalargs.add("-XX:+PrintFlagsFinal");
+ finalargs.add("-version");
+
+ String output = expectValid(finalargs.toArray(new String[0]));
+
+ boolean actualUseCompressedOops = getFlagBoolValue(" UseCompressedOops", output);
+
+ Asserts.assertEQ(expectUseCompressedOops, actualUseCompressedOops);
+ }
+
+ private static boolean getFlagBoolValue(String flag, String where) {
+ Matcher m = Pattern.compile(flag + "\\s+:?= (true|false)").matcher(where);
+ if (!m.find()) {
+ throw new RuntimeException("Could not find value for flag " + flag + " in output string");
+ }
+ return m.group(1).equals("true");
+ }
+
+ private static String expect(String[] flags, boolean hasWarning, boolean hasError, int errorcode) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flags);
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldHaveExitValue(errorcode);
+ return output.getStdout();
+ }
+
+ private static String expectValid(String[] flags) throws Exception {
+ return expect(flags, false, false, 0);
+ }
+}
+
--- a/hotspot/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java Wed Sep 18 12:52:15 2013 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test ClassMetaspaceSizeInJmapHeap
- * @bug 8004924
- * @summary Checks that jmap -heap contains the flag ClassMetaspaceSize
- * @library /testlibrary
- * @run main/othervm -XX:ClassMetaspaceSize=50m ClassMetaspaceSizeInJmapHeap
- */
-
-import com.oracle.java.testlibrary.*;
-import java.nio.file.*;
-import java.io.File;
-import java.nio.charset.Charset;
-import java.util.List;
-
-public class ClassMetaspaceSizeInJmapHeap {
- public static void main(String[] args) throws Exception {
- String pid = Integer.toString(ProcessTools.getProcessId());
-
- JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
- .addToolArg("-heap")
- .addToolArg(pid);
- ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
-
- File out = new File("ClassMetaspaceSizeInJmapHeap.stdout.txt");
- pb.redirectOutput(out);
-
- File err = new File("ClassMetaspaceSizeInJmapHeap.stderr.txt");
- pb.redirectError(err);
-
- run(pb);
-
- OutputAnalyzer output = new OutputAnalyzer(read(out));
- output.shouldContain("ClassMetaspaceSize = 52428800 (50.0MB)");
- out.delete();
- }
-
- private static void run(ProcessBuilder pb) throws Exception {
- Process p = pb.start();
- p.waitFor();
- int exitValue = p.exitValue();
- if (exitValue != 0) {
- throw new Exception("jmap -heap exited with error code: " + exitValue);
- }
- }
-
- private static String read(File f) throws Exception {
- Path p = f.toPath();
- List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
-
- StringBuilder sb = new StringBuilder();
- for (String line : lines) {
- sb.append(line).append('\n');
- }
- return sb.toString();
- }
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test CompressedClassSpaceSizeInJmapHeap
+ * @bug 8004924
+ * @summary Checks that jmap -heap contains the flag CompressedClassSpaceSize
+ * @library /testlibrary
+ * @run main/othervm -XX:CompressedClassSpaceSize=50m CompressedClassSpaceSizeInJmapHeap
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.nio.file.*;
+import java.io.File;
+import java.nio.charset.Charset;
+import java.util.List;
+
+public class CompressedClassSpaceSizeInJmapHeap {
+ public static void main(String[] args) throws Exception {
+ String pid = Integer.toString(ProcessTools.getProcessId());
+
+ JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
+ .addToolArg("-heap")
+ .addToolArg(pid);
+ ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
+
+ File out = new File("CompressedClassSpaceSizeInJmapHeap.stdout.txt");
+ pb.redirectOutput(out);
+
+ File err = new File("CompressedClassSpaceSizeInJmapHeap.stderr.txt");
+ pb.redirectError(err);
+
+ run(pb);
+
+ OutputAnalyzer output = new OutputAnalyzer(read(out));
+ output.shouldContain("CompressedClassSpaceSize = 52428800 (50.0MB)");
+ out.delete();
+ }
+
+ private static void run(ProcessBuilder pb) throws Exception {
+ Process p = pb.start();
+ p.waitFor();
+ int exitValue = p.exitValue();
+ if (exitValue != 0) {
+ throw new Exception("jmap -heap exited with error code: " + exitValue);
+ }
+ }
+
+ private static String read(File f) throws Exception {
+ Path p = f.toPath();
+ List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
+
+ StringBuilder sb = new StringBuilder();
+ for (String line : lines) {
+ sb.append(line).append('\n');
+ }
+ return sb.toString();
+ }
+}
--- a/hotspot/test/gc/metaspace/TestMetaspaceMemoryPool.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/gc/metaspace/TestMetaspaceMemoryPool.java Thu Sep 19 09:26:08 2013 +0200
@@ -22,55 +22,35 @@
*/
import java.util.List;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryManagerMXBean;
-import java.lang.management.MemoryPoolMXBean;
-import java.lang.management.MemoryUsage;
-
-import java.lang.management.RuntimeMXBean;
-import java.lang.management.ManagementFactory;
+import java.lang.management.*;
+import com.oracle.java.testlibrary.*;
+import static com.oracle.java.testlibrary.Asserts.*;
/* @test TestMetaspaceMemoryPool
* @bug 8000754
* @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a
* MemoryManagerMXBean is created.
+ * @library /testlibrary
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers TestMetaspaceMemoryPool
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:CompressedClassSpaceSize=60m TestMetaspaceMemoryPool
*/
public class TestMetaspaceMemoryPool {
public static void main(String[] args) {
verifyThatMetaspaceMemoryManagerExists();
- verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize"));
- if (runsOn64bit()) {
- if (usesCompressedOops()) {
+ boolean isMetaspaceMaxDefined = InputArguments.containsPrefix("-XX:MaxMetaspaceSize");
+ verifyMemoryPool(getMemoryPool("Metaspace"), isMetaspaceMaxDefined);
+
+ if (Platform.is64bit()) {
+ if (InputArguments.contains("-XX:+UseCompressedOops")) {
MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space");
verifyMemoryPool(cksPool, true);
}
}
}
- private static boolean runsOn64bit() {
- return !System.getProperty("sun.arch.data.model").equals("32");
- }
-
- private static boolean usesCompressedOops() {
- return isFlagDefined("+UseCompressedOops");
- }
-
- private static boolean isFlagDefined(String name) {
- RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
- List<String> args = runtimeMxBean.getInputArguments();
- for (String arg : args) {
- if (arg.startsWith("-XX:" + name)) {
- return true;
- }
- }
- return false;
- }
-
private static void verifyThatMetaspaceMemoryManagerExists() {
List<MemoryManagerMXBean> managers = ManagementFactory.getMemoryManagerMXBeans();
for (MemoryManagerMXBean manager : managers) {
@@ -95,32 +75,19 @@
private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) {
MemoryUsage mu = pool.getUsage();
- assertDefined(mu.getInit(), "init");
- assertDefined(mu.getUsed(), "used");
- assertDefined(mu.getCommitted(), "committed");
+ long init = mu.getInit();
+ long used = mu.getUsed();
+ long committed = mu.getCommitted();
+ long max = mu.getMax();
+
+ assertGTE(init, 0L);
+ assertGTE(used, init);
+ assertGTE(committed, used);
if (isMaxDefined) {
- assertDefined(mu.getMax(), "max");
+ assertGTE(max, committed);
} else {
- assertUndefined(mu.getMax(), "max");
- }
- }
-
- private static void assertDefined(long value, String name) {
- assertTrue(value != -1, "Expected " + name + " to be defined");
- }
-
- private static void assertUndefined(long value, String name) {
- assertEquals(value, -1, "Expected " + name + " to be undefined");
- }
-
- private static void assertEquals(long actual, long expected, String msg) {
- assertTrue(actual == expected, msg);
- }
-
- private static void assertTrue(boolean condition, String msg) {
- if (!condition) {
- throw new RuntimeException(msg);
+ assertEQ(max, -1L);
}
}
}
--- a/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java Thu Sep 19 09:26:08 2013 +0200
@@ -33,13 +33,13 @@
* @summary Tests that performance counters for metaspace and compressed class
* space exists and works.
*
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
*
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
*/
public class TestMetaspacePerfCounters {
public static Class fooClass = null;
@@ -61,10 +61,15 @@
}
private static void checkPerfCounters(String ns) throws Exception {
- for (PerfCounter counter : countersInNamespace(ns)) {
- String msg = "Expected " + counter.getName() + " to be larger than 0";
- assertGT(counter.longValue(), 0L, msg);
- }
+ long minCapacity = getMinCapacity(ns);
+ long maxCapacity = getMaxCapacity(ns);
+ long capacity = getCapacity(ns);
+ long used = getUsed(ns);
+
+ assertGTE(minCapacity, 0L);
+ assertGTE(used, minCapacity);
+ assertGTE(capacity, used);
+ assertGTE(maxCapacity, capacity);
}
private static void checkEmptyPerfCounters(String ns) throws Exception {
@@ -75,12 +80,10 @@
}
private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception {
- PerfCounter used = PerfCounters.findByName(ns + ".used");
-
- long before = used.longValue();
+ long before = getUsed(ns);
fooClass = compileAndLoad("Foo", "public class Foo { }");
System.gc();
- long after = used.longValue();
+ long after = getUsed(ns);
assertGT(after, before);
}
@@ -99,6 +102,22 @@
}
private static boolean isUsingCompressedClassPointers() {
- return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedKlassPointers");
+ return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedClassPointers");
+ }
+
+ private static long getMinCapacity(String ns) throws Exception {
+ return PerfCounters.findByName(ns + ".minCapacity").longValue();
+ }
+
+ private static long getCapacity(String ns) throws Exception {
+ return PerfCounters.findByName(ns + ".capacity").longValue();
+ }
+
+ private static long getMaxCapacity(String ns) throws Exception {
+ return PerfCounters.findByName(ns + ".maxCapacity").longValue();
+ }
+
+ private static long getUsed(String ns) throws Exception {
+ return PerfCounters.findByName(ns + ".used").longValue();
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/metaspace/TestMetaspaceSizeFlags.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+/*
+ * @test TestMetaspaceSizeFlags
+ * @key gc
+ * @bug 8024650
+ * @summary Test that metaspace size flags can be set correctly
+ * @library /testlibrary
+ */
+public class TestMetaspaceSizeFlags {
+ public static final long K = 1024L;
+ public static final long M = 1024L * K;
+
+ // HotSpot uses a number of different values to align memory size flags.
+ // This is currently the largest alignment (unless huge large pages are used).
+ public static final long MAX_ALIGNMENT = 32 * M;
+
+ public static void main(String [] args) throws Exception {
+ testMaxMetaspaceSizeEQMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT);
+ // 8024650: MaxMetaspaceSize was adjusted instead of MetaspaceSize.
+ testMaxMetaspaceSizeLTMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT * 2);
+ testMaxMetaspaceSizeGTMetaspaceSize(MAX_ALIGNMENT * 2, MAX_ALIGNMENT);
+ testTooSmallInitialMetaspace(0, 0);
+ testTooSmallInitialMetaspace(0, MAX_ALIGNMENT);
+ testTooSmallInitialMetaspace(MAX_ALIGNMENT, 0);
+ }
+
+ private static void testMaxMetaspaceSizeEQMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+ MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+ Asserts.assertEQ(maxMetaspaceSize, metaspaceSize);
+ Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+ Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
+ }
+
+ private static void testMaxMetaspaceSizeLTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+ MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+ Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+ Asserts.assertEQ(mf.metaspaceSize, maxMetaspaceSize);
+ }
+
+ private static void testMaxMetaspaceSizeGTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+ MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+ Asserts.assertGT(maxMetaspaceSize, metaspaceSize);
+ Asserts.assertGT(mf.maxMetaspaceSize, mf.metaspaceSize);
+ Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+ Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
+ }
+
+ private static void testTooSmallInitialMetaspace(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+ OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
+ output.shouldContain("Too small initial Metaspace size");
+ }
+
+ private static MetaspaceFlags runAndGetValue(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+ OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
+ output.shouldNotMatch("Error occurred during initialization of VM\n.*");
+
+ String stringMaxMetaspaceSize = output.firstMatch(".* MaxMetaspaceSize .* := (\\d+).*", 1);
+ String stringMetaspaceSize = output.firstMatch(".* MetaspaceSize .* := (\\d+).*", 1);
+
+ return new MetaspaceFlags(Long.parseLong(stringMaxMetaspaceSize),
+ Long.parseLong(stringMetaspaceSize));
+ }
+
+ private static OutputAnalyzer run(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:MaxMetaspaceSize=" + maxMetaspaceSize,
+ "-XX:MetaspaceSize=" + metaspaceSize,
+ "-XX:-UseLargePages", // Prevent us from using 2GB large pages on solaris + sparc.
+ "-XX:+PrintFlagsFinal",
+ "-version");
+ return new OutputAnalyzer(pb.start());
+ }
+
+ private static class MetaspaceFlags {
+ public long maxMetaspaceSize;
+ public long metaspaceSize;
+ public MetaspaceFlags(long maxMetaspaceSize, long metaspaceSize) {
+ this.maxMetaspaceSize = maxMetaspaceSize;
+ this.metaspaceSize = metaspaceSize;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java Thu Sep 19 09:26:08 2013 +0200
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.List;
+import java.lang.management.*;
+
+import com.oracle.java.testlibrary.*;
+import static com.oracle.java.testlibrary.Asserts.*;
+
+/* @test TestPerfCountersAndMemoryPools
+ * @bug 8023476
+ * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace
+ * report the same data.
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
+ */
+public class TestPerfCountersAndMemoryPools {
+ public static void main(String[] args) throws Exception {
+ checkMemoryUsage("Metaspace", "sun.gc.metaspace");
+
+ if (InputArguments.contains("-XX:+UseCompressedKlassPointers") && Platform.is64bit()) {
+ checkMemoryUsage("Compressed Class Space", "sun.gc.compressedclassspace");
+ }
+ }
+
+ private static MemoryUsage getMemoryUsage(String memoryPoolName) {
+ List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
+ for (MemoryPoolMXBean pool : pools) {
+ if (pool.getName().equals(memoryPoolName)) {
+ return pool.getUsage();
+ }
+ }
+
+ throw new RuntimeException("Excpted to find a memory pool with name " +
+ memoryPoolName);
+ }
+
+ private static void checkMemoryUsage(String memoryPoolName, String perfNS)
+ throws Exception {
+ // Need to do a gc before each comparison to update the perf counters
+
+ System.gc();
+ MemoryUsage mu = getMemoryUsage(memoryPoolName);
+ assertEQ(getMinCapacity(perfNS), mu.getInit());
+
+ System.gc();
+ mu = getMemoryUsage(memoryPoolName);
+ assertEQ(getUsed(perfNS), mu.getUsed());
+
+ System.gc();
+ mu = getMemoryUsage(memoryPoolName);
+ assertEQ(getCapacity(perfNS), mu.getCommitted());
+ }
+
+ private static long getMinCapacity(String ns) throws Exception {
+ return PerfCounters.findByName(ns + ".minCapacity").longValue();
+ }
+
+ private static long getCapacity(String ns) throws Exception {
+ return PerfCounters.findByName(ns + ".capacity").longValue();
+ }
+
+ private static long getUsed(String ns) throws Exception {
+ return PerfCounters.findByName(ns + ".used").longValue();
+ }
+}
--- a/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java Thu Sep 19 09:26:08 2013 +0200
@@ -24,7 +24,7 @@
/*
* @test
* @bug 8003424
- * @summary Testing UseCompressedKlassPointers with CDS
+ * @summary Testing UseCompressedClassPointers with CDS
* @library /testlibrary
* @run main CDSCompressedKPtrs
*/
@@ -36,7 +36,7 @@
ProcessBuilder pb;
if (Platform.is64bit()) {
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+ "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
try {
@@ -44,7 +44,7 @@
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+ "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("sharing");
--- a/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java Thu Sep 19 09:26:08 2013 +0200
@@ -24,7 +24,7 @@
/*
* @test
* @bug 8003424
- * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off.
+ * @summary Test that cannot use CDS if UseCompressedClassPointers is turned off.
* @library /testlibrary
* @run main CDSCompressedKPtrsError
*/
@@ -36,7 +36,7 @@
ProcessBuilder pb;
if (Platform.is64bit()) {
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
try {
@@ -44,21 +44,21 @@
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops",
+ "-XX:-UseCompressedClassPointers", "-XX:-UseCompressedOops",
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+ "-XX:-UseCompressedClassPointers", "-XX:+UseCompressedOops",
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops",
+ "-XX:+UseCompressedClassPointers", "-XX:-UseCompressedOops",
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Unable to use shared archive");
@@ -71,19 +71,19 @@
// Test bad options with -Xshare:dump.
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Cannot dump shared archive");
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Cannot dump shared archive");
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Cannot dump shared archive");
--- a/hotspot/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java Thu Sep 19 09:26:08 2013 +0200
@@ -25,7 +25,7 @@
* @test
* @bug 8000968
* @key regression
- * @summary NPG: UseCompressedKlassPointers asserts with ObjectAlignmentInBytes=32
+ * @summary NPG: UseCompressedClassPointers asserts with ObjectAlignmentInBytes=32
* @library /testlibrary
*/
@@ -52,7 +52,7 @@
OutputAnalyzer output;
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:+UseCompressedKlassPointers",
+ "-XX:+UseCompressedClassPointers",
"-XX:+UseCompressedOops",
"-XX:ObjectAlignmentInBytes=" + alignment,
"-version");
--- a/hotspot/test/testlibrary/OutputAnalyzerTest.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/testlibrary/OutputAnalyzerTest.java Thu Sep 19 09:26:08 2013 +0200
@@ -172,5 +172,22 @@
} catch (RuntimeException e) {
// expected
}
+
+ {
+ String aaaa = "aaaa";
+ String result = output.firstMatch(aaaa);
+ if (!aaaa.equals(result)) {
+ throw new Exception("firstMatch(String) faild to match. Expected: " + aaaa + " got: " + result);
+ }
+ }
+
+ {
+ String aa = "aa";
+ String aa_grouped_aa = aa + "(" + aa + ")";
+ String result = output.firstMatch(aa_grouped_aa, 1);
+ if (!aa.equals(result)) {
+ throw new Exception("firstMatch(String, int) failed to match. Expected: " + aa + " got: " + result);
+ }
+ }
}
}
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java Thu Sep 19 09:26:08 2013 +0200
@@ -41,6 +41,9 @@
/**
* Returns true if {@code arg} is an input argument to the VM.
*
+ * This is useful for checking boolean flags such as -XX:+UseSerialGC or
+ * -XX:-UsePerfData.
+ *
* @param arg The name of the argument.
* @return {@code true} if the given argument is an input argument,
* otherwise {@code false}.
@@ -48,4 +51,26 @@
public static boolean contains(String arg) {
return args.contains(arg);
}
+
+ /**
+ * Returns true if {@code prefix} is the start of an input argument to the
+ * VM.
+ *
+ * This is useful for checking if flags describing a quantity, such as
+ * -XX:+MaxMetaspaceSize=100m, is set without having to know the quantity.
+ * To check if the flag -XX:MaxMetaspaceSize is set, use
+ * {@code InputArguments.containsPrefix("-XX:MaxMetaspaceSize")}.
+ *
+ * @param prefix The start of the argument.
+ * @return {@code true} if the given argument is the start of an input
+ * argument, otherwise {@code false}.
+ */
+ public static boolean containsPrefix(String prefix) {
+ for (String arg : args) {
+ if (arg.startsWith(prefix)) {
+ return true;
+ }
+ }
+ return false;
+ }
}
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java Thu Sep 19 09:26:08 2013 +0200
@@ -211,13 +211,13 @@
if (matcher.find()) {
reportDiagnosticSummary();
throw new RuntimeException("'" + pattern
- + "' found in stdout \n");
+ + "' found in stdout: '" + matcher.group() + "' \n");
}
matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
if (matcher.find()) {
reportDiagnosticSummary();
throw new RuntimeException("'" + pattern
- + "' found in stderr \n");
+ + "' found in stderr: '" + matcher.group() + "' \n");
}
}
@@ -254,6 +254,37 @@
}
/**
+ * Get the captured group of the first string matching the pattern.
+ * stderr is searched before stdout.
+ *
+ * @param pattern The multi-line pattern to match
+ * @param group The group to capture
+ * @return The matched string or null if no match was found
+ */
+ public String firstMatch(String pattern, int group) {
+ Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
+ Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
+ if (stderrMatcher.find()) {
+ return stderrMatcher.group(group);
+ }
+ if (stdoutMatcher.find()) {
+ return stdoutMatcher.group(group);
+ }
+ return null;
+ }
+
+ /**
+ * Get the first string matching the pattern.
+ * stderr is searched before stdout.
+ *
+ * @param pattern The multi-line pattern to match
+ * @return The matched string or null if no match was found
+ */
+ public String firstMatch(String pattern) {
+ return firstMatch(pattern, 0);
+ }
+
+ /**
* Verify the exit value of the process
*
* @param expectedExitValue Expected exit value from process
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Wed Sep 18 12:52:15 2013 -0400
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Sep 19 09:26:08 2013 +0200
@@ -61,6 +61,8 @@
registerNatives();
}
+ // Get the maximum heap size supporting COOPs
+ public native long getCompressedOopsMaxHeapSize();
// Arguments
public native void printHeapSizes();