--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc/shared/Generation.java Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc/shared/Generation.java Fri Jun 19 13:03:58 2015 +0000
@@ -49,7 +49,6 @@
public abstract class Generation extends VMObject {
private static long reservedFieldOffset;
private static long virtualSpaceFieldOffset;
- private static CIntegerField levelField;
protected static final int K = 1024;
// Fields for class StatRecord
private static Field statRecordField;
@@ -75,7 +74,6 @@
reservedFieldOffset = type.getField("_reserved").getOffset();
virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset();
- levelField = type.getCIntegerField("_level");
// StatRecord
statRecordField = type.getField("_stat_record");
type = db.lookupType("Generation::StatRecord");
@@ -130,14 +128,6 @@
}
}
- public GenerationSpec spec() {
- return ((GenCollectedHeap) VM.getVM().getUniverse().heap()).spec(level());
- }
-
- public int level() {
- return (int) levelField.getValue(addr);
- }
-
public int invocations() {
return getStatRecord().getInvocations();
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java Fri Jun 19 13:03:58 2015 +0000
@@ -84,11 +84,11 @@
}
public boolean isInNewGen() {
- return ((gen != null) && (gen.level() == 0));
+ return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(0)));
}
public boolean isInOldGen() {
- return ((gen != null) && (gen.level() == 1));
+ return ((gen != null) && (gen == ((GenCollectedHeap)heap).getGen(1)));
}
public boolean inOtherGen() {
@@ -207,8 +207,6 @@
tty.print("In new generation ");
} else if (isInOldGen()) {
tty.print("In old generation ");
- } else if (gen != null) {
- tty.print("In Generation " + getGeneration().level());
} else {
tty.print("In unknown section of Java heap");
}
--- a/hotspot/make/bsd/makefiles/dtrace.make Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/make/bsd/makefiles/dtrace.make Fri Jun 19 13:03:58 2015 +0000
@@ -263,14 +263,19 @@
$(DtraceOutDir):
mkdir $(DtraceOutDir)
+# When building using a devkit, dtrace cannot find the correct preprocessor so
+# we run it explicitly before runing dtrace.
$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
- $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
+ $(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hotspot.d > $(DtraceOutDir)/hotspot.d
+ $(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hotspot.d
$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
- $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
+ $(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d > $(DtraceOutDir)/hotspot_jni.d
+ $(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hotspot_jni.d
$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
- $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
+ $(QUIETLY) $(CC) -E $(DTRACE_OPTS) -I. -x c $(DTRACE_COMMON_SRCDIR)/hs_private.d > $(DtraceOutDir)/hs_private.d
+ $(QUIETLY) $(DTRACE_PROG) -h -o $@ -s $(DtraceOutDir)/hs_private.d
dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
--- a/hotspot/make/bsd/makefiles/universal.gmk Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/make/bsd/makefiles/universal.gmk Fri Jun 19 13:03:58 2015 +0000
@@ -56,13 +56,14 @@
universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
$(RM) -r $(EXPORT_PATH)/lib/{i386,amd64}
+LIPO ?= lipo
# Package built libraries in a universal binary
$(UNIVERSAL_LIPO_LIST):
BUILT_LIPO_FILES="`find $(EXPORT_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
$(MKDIR) -p $(shell dirname $@); \
- lipo -create -output $@ $${BUILT_LIPO_FILES}; \
+ $(LIPO) -create -output $@ $${BUILT_LIPO_FILES}; \
fi
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -2888,41 +2888,40 @@
cmp(src1, rscratch1);
}
-void MacroAssembler::store_check(Register obj) {
- // Does a store check for the oop in register obj. The content of
- // register obj is destroyed afterwards.
- store_check_part_1(obj);
- store_check_part_2(obj);
-}
-
void MacroAssembler::store_check(Register obj, Address dst) {
store_check(obj);
}
-
-// split the store check operation so that other instructions can be scheduled inbetween
-void MacroAssembler::store_check_part_1(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- lsr(obj, obj, CardTableModRefBS::card_shift);
-}
-
-void MacroAssembler::store_check_part_2(Register obj) {
+void MacroAssembler::store_check(Register obj) {
+ // Does a store check for the oop in register obj. The content of
+ // register obj is destroyed afterwards.
+
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+
+ CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
- // The calculation for byte_map_base is as follows:
- // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
- // So this essentially converts an address to a displacement and
- // it will never need to be relocated.
-
- // FIXME: It's not likely that disp will fit into an offset so we
- // don't bother to check, but it could save an instruction.
- intptr_t disp = (intptr_t) ct->byte_map_base;
- mov(rscratch1, disp);
- strb(zr, Address(obj, rscratch1));
+ lsr(obj, obj, CardTableModRefBS::card_shift);
+
+ assert(CardTableModRefBS::dirty_card_val() == 0, "must be");
+
+ {
+ ExternalAddress cardtable((address) ct->byte_map_base);
+ unsigned long offset;
+ adrp(rscratch1, cardtable, offset);
+ assert(offset == 0, "byte_map_base is misaligned");
+ }
+
+ if (UseCondCardMark) {
+ Label L_already_dirty;
+ ldrb(rscratch2, Address(obj, rscratch1));
+ cbz(rscratch2, L_already_dirty);
+ strb(zr, Address(obj, rscratch1));
+ bind(L_already_dirty);
+ } else {
+ strb(zr, Address(obj, rscratch1));
+ }
}
void MacroAssembler::load_klass(Register dst, Register src) {
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -756,10 +756,6 @@
#endif // INCLUDE_ALL_GCS
- // split store_check(Register obj) to enhance instruction interleaving
- void store_check_part_1(Register obj);
- void store_check_part_2(Register obj);
-
// oop manipulations
void load_klass(Register dst, Register src);
void store_klass(Register dst, Register src);
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -4260,31 +4260,24 @@
//////////////////////////////////////////////////////////////////////////////////
+void MacroAssembler::store_check(Register obj, Address dst) {
+ store_check(obj);
+}
+
void MacroAssembler::store_check(Register obj) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
- store_check_part_1(obj);
- store_check_part_2(obj);
-}
-
-void MacroAssembler::store_check(Register obj, Address dst) {
- store_check(obj);
-}
-
-
-// split the store check operation so that other instructions can be scheduled inbetween
-void MacroAssembler::store_check_part_1(Register obj) {
+
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- shrptr(obj, CardTableModRefBS::card_shift);
-}
-
-void MacroAssembler::store_check_part_2(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ shrptr(obj, CardTableModRefBS::card_shift);
+
+ Address card_addr;
+
// The calculation for byte_map_base is as follows:
// byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
// So this essentially converts an address to a displacement and it will
@@ -4292,8 +4285,7 @@
// large for a 32bit displacement.
intptr_t disp = (intptr_t) ct->byte_map_base;
if (is_simm32(disp)) {
- Address cardtable(noreg, obj, Address::times_1, disp);
- movb(cardtable, 0);
+ card_addr = Address(noreg, obj, Address::times_1, disp);
} else {
// By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
// displacement and done in a single instruction given favorable mapping and a
@@ -4301,7 +4293,21 @@
// entry and that entry is not properly handled by the relocation code.
AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
Address index(noreg, obj, Address::times_1);
- movb(as_Address(ArrayAddress(cardtable, index)), 0);
+ card_addr = as_Address(ArrayAddress(cardtable, index));
+ }
+
+ int dirty = CardTableModRefBS::dirty_card_val();
+ if (UseCondCardMark) {
+ Label L_already_dirty;
+ if (UseConcMarkSweepGC) {
+ membar(Assembler::StoreLoad);
+ }
+ cmpb(card_addr, dirty);
+ jcc(Assembler::equal, L_already_dirty);
+ movb(card_addr, dirty);
+ bind(L_already_dirty);
+ } else {
+ movb(card_addr, dirty);
}
}
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -315,10 +315,6 @@
#endif // INCLUDE_ALL_GCS
- // split store_check(Register obj) to enhance instruction interleaving
- void store_check_part_1(Register obj);
- void store_check_part_2(Register obj);
-
// C 'boolean' to Java boolean: x == 0 ? 0 : 1
void c2bool(Register x);
--- a/hotspot/src/os/aix/vm/os_aix.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/os/aix/vm/os_aix.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -1654,7 +1654,7 @@
}
}
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// cpu
st->print("CPU:");
st->print("total %d", os::processor_count());
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -1708,7 +1708,7 @@
os::Posix::print_load_average(st);
}
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do for now.
}
--- a/hotspot/src/os/linux/vm/os_linux.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -2216,12 +2216,52 @@
st->cr();
}
-void os::pd_print_cpu_info(outputStream* st) {
- st->print("\n/proc/cpuinfo:\n");
- if (!_print_ascii_file("/proc/cpuinfo", st)) {
- st->print(" <Not Available>");
- }
- st->cr();
+// Print the first "model name" line and the first "flags" line
+// that we find and nothing more. We assume "model name" comes
+// before "flags" so if we find a second "model name", then the
+// "flags" field is considered missing.
+static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
+#if defined(IA32) || defined(AMD64)
+ // Other platforms have less repetitive cpuinfo files
+ FILE *fp = fopen("/proc/cpuinfo", "r");
+ if (fp) {
+ while (!feof(fp)) {
+ if (fgets(buf, buflen, fp)) {
+ // Assume model name comes before flags
+ bool model_name_printed = false;
+ if (strstr(buf, "model name") != NULL) {
+ if (!model_name_printed) {
+ st->print_raw("\nCPU Model and flags from /proc/cpuinfo:\n");
+ st->print_raw(buf);
+ model_name_printed = true;
+ } else {
+ // model name printed but not flags? Odd, just return
+ fclose(fp);
+ return true;
+ }
+ }
+ // print the flags line too
+ if (strstr(buf, "flags") != NULL) {
+ st->print_raw(buf);
+ fclose(fp);
+ return true;
+ }
+ }
+ }
+ fclose(fp);
+ }
+#endif // x86 platforms
+ return false;
+}
+
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
+ // Only print the model name if the platform provides this as a summary
+ if (!print_model_name_and_flags(st, buf, buflen)) {
+ st->print("\n/proc/cpuinfo:\n");
+ if (!_print_ascii_file("/proc/cpuinfo", st)) {
+ st->print_cr(" <Not Available>");
+ }
+ }
}
void os::print_siginfo(outputStream* st, void* siginfo) {
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -1997,7 +1997,7 @@
return status;
}
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do for now.
}
--- a/hotspot/src/os/windows/vm/os_windows.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -1733,7 +1733,7 @@
st->cr();
}
-void os::pd_print_cpu_info(outputStream* st) {
+void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do for now.
}
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -1619,6 +1619,9 @@
LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
if (UseCondCardMark) {
LIR_Opr cur_value = new_register(T_INT);
+ if (UseConcMarkSweepGC) {
+ __ membar_storeload();
+ }
__ move(card_addr, cur_value);
LabelObj* L_already_dirty = new LabelObj();
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -949,8 +949,7 @@
assert(runtime_visible_annotations != NULL, "null visible annotations");
parse_annotations(runtime_visible_annotations,
runtime_visible_annotations_length,
- parsed_annotations,
- CHECK);
+ parsed_annotations);
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
} else if (attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
if (runtime_invisible_annotations_exists) {
@@ -1643,7 +1642,6 @@
index = skip_annotation(buffer, limit, index);
break;
default:
- assert(false, "annotation tag");
return limit; // bad tag byte
}
return index;
@@ -1651,8 +1649,7 @@
// Sift through annotations, looking for those significant to the VM:
void ClassFileParser::parse_annotations(u1* buffer, int limit,
- ClassFileParser::AnnotationCollector* coll,
- TRAPS) {
+ ClassFileParser::AnnotationCollector* coll) {
// annotations := do(nann:u2) {annotation}
int index = 0;
if ((index += 2) >= limit) return; // read nann
@@ -2280,8 +2277,7 @@
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
parse_annotations(runtime_visible_annotations,
- runtime_visible_annotations_length, &parsed_annotations,
- CHECK_(nullHandle));
+ runtime_visible_annotations_length, &parsed_annotations);
cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
} else if (method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
if (runtime_invisible_annotations_exists) {
@@ -2945,8 +2941,7 @@
assert(runtime_visible_annotations != NULL, "null visible annotations");
parse_annotations(runtime_visible_annotations,
runtime_visible_annotations_length,
- parsed_annotations,
- CHECK);
+ parsed_annotations);
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
} else if (tag == vmSymbols::tag_runtime_invisible_annotations()) {
if (runtime_invisible_annotations_exists) {
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -294,8 +294,7 @@
int skip_annotation_value(u1* buffer, int limit, int index);
void parse_annotations(u1* buffer, int limit,
/* Results (currently, only one result is supported): */
- AnnotationCollector* result,
- TRAPS);
+ AnnotationCollector* result);
// Final setup
unsigned int compute_oop_map_count(instanceKlassHandle super,
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -190,10 +190,10 @@
};
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
- ReservedSpace rs, size_t initial_byte_size, int level,
+ ReservedSpace rs, size_t initial_byte_size,
CardTableRS* ct, bool use_adaptive_freelists,
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
- CardGeneration(rs, initial_byte_size, level, ct),
+ CardGeneration(rs, initial_byte_size, ct),
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_did_compact(false)
{
@@ -682,12 +682,17 @@
void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
if (PrintGCDetails) {
+ // I didn't want to change the logging when removing the level concept,
+ // but I guess this logging could say "old" or something instead of "1".
+ assert(gch->is_old_gen(this),
+ "The CMS generation should be the old generation");
+ uint level = 1;
if (Verbose) {
- gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
- level(), short_name(), s, used(), capacity());
+ gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
+ level, short_name(), s, used(), capacity());
} else {
- gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
- level(), short_name(), s, used() / K, capacity() / K);
+ gclog_or_tty->print("[%u %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
+ level, short_name(), s, used() / K, capacity() / K);
}
}
if (Verbose) {
@@ -797,27 +802,22 @@
gclog_or_tty->print_cr("\nFrom compute_new_size: ");
gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
gclog_or_tty->print_cr(" Desired free fraction %f",
- desired_free_percentage);
+ desired_free_percentage);
gclog_or_tty->print_cr(" Maximum free fraction %f",
- maximum_free_percentage);
+ maximum_free_percentage);
gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000);
gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
- desired_capacity/1000);
- int prev_level = level() - 1;
- if (prev_level >= 0) {
- size_t prev_size = 0;
- GenCollectedHeap* gch = GenCollectedHeap::heap();
- Generation* prev_gen = gch->young_gen();
- prev_size = prev_gen->capacity();
- gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
- prev_size/1000);
- }
+ desired_capacity/1000);
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+ assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
+ size_t young_size = gch->young_gen()->capacity();
+ gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000);
gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
- unsafe_max_alloc_nogc()/1000);
+ unsafe_max_alloc_nogc()/1000);
gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
- contiguous_available()/1000);
+ contiguous_available()/1000);
gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
- expand_bytes);
+ expand_bytes);
}
// safe if expansion fails
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
@@ -1650,8 +1650,7 @@
_intra_sweep_estimate.padded_average());
}
- GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
- ref_processor(), clear_all_soft_refs);
+ GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
#ifdef ASSERT
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
size_t free_size = cms_space->free();
@@ -2432,7 +2431,7 @@
StrongRootsScope srs(1);
gch->gen_process_roots(&srs,
- _cmsGen->level(),
+ GenCollectedHeap::OldGen,
true, // younger gens are roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
@@ -2504,7 +2503,7 @@
StrongRootsScope srs(1);
gch->gen_process_roots(&srs,
- _cmsGen->level(),
+ GenCollectedHeap::OldGen,
true, // younger gens are roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
@@ -3031,7 +3030,7 @@
StrongRootsScope srs(1);
gch->gen_process_roots(&srs,
- _cmsGen->level(),
+ GenCollectedHeap::OldGen,
true, // younger gens are roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
@@ -4282,15 +4281,12 @@
FlagSetting fl(gch->_is_gc_active, false);
NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
- int level = _cmsGen->level() - 1;
- if (level >= 0) {
- gch->do_collection(true, // full (i.e. force, see below)
- false, // !clear_all_soft_refs
- 0, // size
- false, // is_tlab
- level // max_level
- );
- }
+ gch->do_collection(true, // full (i.e. force, see below)
+ false, // !clear_all_soft_refs
+ 0, // size
+ false, // is_tlab
+ GenCollectedHeap::YoungGen // type
+ );
}
FreelistLocker x(this);
MutexLockerEx y(bitMapLock(),
@@ -4464,7 +4460,7 @@
CLDToOopClosure cld_closure(&par_mri_cl, true);
gch->gen_process_roots(_strong_roots_scope,
- _collector->_cmsGen->level(),
+ GenCollectedHeap::OldGen,
false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
@@ -4603,7 +4599,7 @@
_timer.reset();
_timer.start();
gch->gen_process_roots(_strong_roots_scope,
- _collector->_cmsGen->level(),
+ GenCollectedHeap::OldGen,
false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
@@ -5184,7 +5180,7 @@
StrongRootsScope srs(1);
gch->gen_process_roots(&srs,
- _cmsGen->level(),
+ GenCollectedHeap::OldGen,
true, // younger gens as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
@@ -5648,11 +5644,12 @@
return _cmsSpace->find_chunk_at_end();
}
-void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
+void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
bool full) {
- // The next lower level has been collected. Gather any statistics
+ // If the young generation has been collected, gather any statistics
// that are of interest at this point.
- if (!full && (current_level + 1) == level()) {
+ bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
+ if (!full && current_is_young) {
// Gather statistics on the young generation collection.
collector()->stats().record_gc0_end(used());
}
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -1063,7 +1063,7 @@
void shrink_free_list_by(size_t bytes);
// Update statistics for GC
- virtual void update_gc_stats(int level, bool full);
+ virtual void update_gc_stats(Generation* current_generation, bool full);
// Maximum available space in the generation (including uncommitted)
// space.
@@ -1079,7 +1079,7 @@
public:
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
- int level, CardTableRS* ct,
+ CardTableRS* ct,
bool use_adaptive_freelists,
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -62,25 +62,25 @@
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif
ParScanThreadState::ParScanThreadState(Space* to_space_,
- ParNewGeneration* gen_,
+ ParNewGeneration* young_gen_,
Generation* old_gen_,
int thread_num_,
ObjToScanQueueSet* work_queue_set_,
Stack<oop, mtGC>* overflow_stacks_,
size_t desired_plab_sz_,
ParallelTaskTerminator& term_) :
- _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
+ _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
_ageTable(false), // false ==> not the global age table, no perf data.
_to_space_alloc_buffer(desired_plab_sz_),
- _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
- _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
- _older_gen_closure(gen_, this),
+ _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
+ _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
+ _older_gen_closure(young_gen_, this),
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
- &_to_space_root_closure, gen_, &_old_gen_root_closure,
+ &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
work_queue_set_, &term_),
- _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
+ _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure),
_strong_roots_time(0.0), _term_time(0.0)
{
@@ -481,7 +481,6 @@
ParScanThreadState* par_scan_state) :
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
{
- assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end();
}
@@ -566,11 +565,11 @@
par_scan_state()->end_term_time();
}
-ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
+ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
StrongRootsScope* strong_roots_scope) :
AbstractGangTask("ParNewGeneration collection"),
- _gen(gen), _old_gen(old_gen),
+ _young_gen(young_gen), _old_gen(old_gen),
_young_old_boundary(young_old_boundary),
_state_set(state_set),
_strong_roots_scope(strong_roots_scope)
@@ -596,7 +595,7 @@
par_scan_state.start_strong_roots();
gch->gen_process_roots(_strong_roots_scope,
- _gen->level(),
+ GenCollectedHeap::YoungGen,
true, // Process younger gens, if any,
// as strong roots.
GenCollectedHeap::SO_ScavengeCodeCache,
@@ -616,8 +615,8 @@
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif
ParNewGeneration::
-ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
- : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
+ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
+ : DefNewGeneration(rs, initial_byte_size, "PCopy"),
_overflow_list(NULL),
_is_alive_closure(this),
_plab_stats(YoungPLABSize, PLABWeight)
@@ -752,7 +751,7 @@
private:
virtual void work(uint worker_id);
private:
- ParNewGeneration& _gen;
+ ParNewGeneration& _young_gen;
ProcessTask& _task;
Generation& _old_gen;
HeapWord* _young_old_boundary;
@@ -760,12 +759,12 @@
};
ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
- ParNewGeneration& gen,
+ ParNewGeneration& young_gen,
Generation& old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet& state_set)
: AbstractGangTask("ParNewGeneration parallel reference processing"),
- _gen(gen),
+ _young_gen(young_gen),
_task(task),
_old_gen(old_gen),
_young_old_boundary(young_old_boundary),
@@ -806,12 +805,12 @@
GenCollectedHeap* gch = GenCollectedHeap::heap();
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
- _state_set.reset(workers->active_workers(), _generation.promotion_failed());
- ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
- _generation.reserved().end(), _state_set);
+ _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
+ ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
+ _young_gen.reserved().end(), _state_set);
workers->run_task(&rp_task);
_state_set.reset(0 /* bad value in debug if not reset */,
- _generation.promotion_failed());
+ _young_gen.promotion_failed());
}
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
@@ -835,10 +834,10 @@
ScanClosure(g, gc_barrier) {}
EvacuateFollowersClosureGeneral::
-EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
OopsInGenClosure* cur,
OopsInGenClosure* older) :
- _gch(gch), _level(level),
+ _gch(gch),
_scan_cur_or_nonheap(cur), _scan_older(older)
{}
@@ -846,10 +845,10 @@
do {
// Beware: this call will lead to closure applications via virtual
// calls.
- _gch->oop_since_save_marks_iterate(_level,
+ _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
_scan_cur_or_nonheap,
_scan_older);
- } while (!_gch->no_allocs_since_save_marks(_level));
+ } while (!_gch->no_allocs_since_save_marks(true /* include_young */));
}
@@ -972,14 +971,14 @@
ScanClosure scan_without_gc_barrier(this, false);
ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
- EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
+ EvacuateFollowersClosureGeneral evacuate_followers(gch,
&scan_without_gc_barrier, &scan_with_gc_barrier);
rp->setup_policy(clear_all_soft_refs);
// Can the mt_degree be set later (at run_task() time would be best)?
rp->set_active_mt_degree(active_workers);
ReferenceProcessorStats stats;
if (rp->processing_is_mt()) {
- ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+ ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, &task_executor,
_gc_timer, _gc_tracer.gc_id());
@@ -1045,7 +1044,7 @@
rp->set_enqueuing_is_done(true);
if (rp->processing_is_mt()) {
- ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+ ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
rp->enqueue_discovered_references(&task_executor);
} else {
rp->enqueue_discovered_references(NULL);
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -234,14 +234,14 @@
class ParNewGenTask: public AbstractGangTask {
private:
- ParNewGeneration* _gen;
+ ParNewGeneration* _young_gen;
Generation* _old_gen;
HeapWord* _young_old_boundary;
class ParScanThreadStateSet* _state_set;
StrongRootsScope* _strong_roots_scope;
public:
- ParNewGenTask(ParNewGeneration* gen,
+ ParNewGenTask(ParNewGeneration* young_gen,
Generation* old_gen,
HeapWord* young_old_boundary,
ParScanThreadStateSet* state_set,
@@ -264,11 +264,10 @@
class EvacuateFollowersClosureGeneral: public VoidClosure {
private:
GenCollectedHeap* _gch;
- int _level;
OopsInGenClosure* _scan_cur_or_nonheap;
OopsInGenClosure* _scan_older;
public:
- EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+ EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
OopsInGenClosure* cur,
OopsInGenClosure* older);
virtual void do_void();
@@ -288,12 +287,14 @@
// Implements AbstractRefProcTaskExecutor for ParNew.
class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
private:
- ParNewGeneration& _generation;
+ ParNewGeneration& _young_gen;
+ Generation& _old_gen;
ParScanThreadStateSet& _state_set;
public:
- ParNewRefProcTaskExecutor(ParNewGeneration& generation,
+ ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
+ Generation& old_gen,
ParScanThreadStateSet& state_set)
- : _generation(generation), _state_set(state_set)
+ : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
{ }
// Executes a task using worker threads.
@@ -353,7 +354,7 @@
void set_survivor_overflow(bool v) { _survivor_overflow = v; }
public:
- ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
+ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
~ParNewGeneration() {
for (uint i = 0; i < ParallelGCThreads; i++)
--- a/hotspot/src/share/vm/gc/cms/parOopClosures.inline.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/parOopClosures.inline.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -72,7 +72,7 @@
bool root_scan) {
assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
generation()->is_in_reserved(p))
- && (generation()->level() == 0 || gc_barrier),
+ && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
"The gen must be right, and we must be doing the barrier "
"in older generations.");
T heap_oop = oopDesc::load_heap_oop(p);
--- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -198,8 +198,7 @@
assert(SafepointSynchronize::is_at_safepoint(),
"We can only be executing this arm of if at a safepoint");
GCCauseSetter gccs(gch, _gc_cause);
- gch->do_full_collection(gch->must_clear_all_soft_refs(),
- 0 /* collect only youngest gen */);
+ gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
} // Else no need for a foreground young gc
assert((_gc_count_before < gch->total_collections()) ||
(GC_locker::is_active() /* gc may have been skipped */
--- a/hotspot/src/share/vm/gc/g1/g1Allocator.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -227,7 +227,7 @@
size_t word_sz,
AllocationContext_t context) {
G1PLAB* buffer = alloc_buffer(dest, context);
- if (_survivor_alignment_bytes == 0) {
+ if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
return buffer->allocate(word_sz);
} else {
return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -1304,7 +1304,7 @@
size_t survived_guess = survived + promoted;
_avg_survived->sample(survived_guess);
}
- avg_promoted()->sample(promoted + _avg_pretenured->padded_average());
+ avg_promoted()->sample(promoted);
if (PrintAdaptiveSizePolicy) {
gclog_or_tty->print_cr(
--- a/hotspot/src/share/vm/gc/parallel/psOldGen.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psOldGen.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -199,7 +199,7 @@
// Allocations in the old generation need to be reported
if (res != NULL) {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- heap->size_policy()->tenured_allocation(word_size);
+ heap->size_policy()->tenured_allocation(word_size * HeapWordSize);
}
return res;
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -58,11 +58,13 @@
// Methods of protected closure types.
-DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
- assert(g->level() == 0, "Optimized for youngest gen.");
+DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
+ assert(_young_gen->kind() == Generation::ParNew ||
+ _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
}
+
bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
- return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
+ return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
}
DefNewGeneration::KeepAliveClosure::
@@ -85,39 +87,38 @@
void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
DefNewGeneration::EvacuateFollowersClosure::
-EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
- ScanClosure* cur, ScanClosure* older) :
- _gch(gch), _level(level),
- _scan_cur_or_nonheap(cur), _scan_older(older)
+EvacuateFollowersClosure(GenCollectedHeap* gch,
+ ScanClosure* cur,
+ ScanClosure* older) :
+ _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
{}
void DefNewGeneration::EvacuateFollowersClosure::do_void() {
do {
- _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
- _scan_older);
- } while (!_gch->no_allocs_since_save_marks(_level));
+ _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
+ } while (!_gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen));
}
DefNewGeneration::FastEvacuateFollowersClosure::
-FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
- DefNewGeneration* gen,
- FastScanClosure* cur, FastScanClosure* older) :
- _gch(gch), _level(level), _gen(gen),
- _scan_cur_or_nonheap(cur), _scan_older(older)
-{}
+FastEvacuateFollowersClosure(GenCollectedHeap* gch,
+ FastScanClosure* cur,
+ FastScanClosure* older) :
+ _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
+{
+ assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
+ _gen = (DefNewGeneration*)_gch->young_gen();
+}
void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
do {
- _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
- _scan_older);
- } while (!_gch->no_allocs_since_save_marks(_level));
+ _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
+ } while (!_gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen));
guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
}
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{
- assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end();
}
@@ -127,7 +128,6 @@
FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{
- assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end();
}
@@ -168,7 +168,6 @@
ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
_g(g)
{
- assert(_g->level() == 0, "Optimized for youngest generation");
_boundary = _g->reserved().end();
}
@@ -186,9 +185,8 @@
DefNewGeneration::DefNewGeneration(ReservedSpace rs,
size_t initial_size,
- int level,
const char* policy)
- : Generation(rs, initial_size, level),
+ : Generation(rs, initial_size),
_promo_failure_drain_in_progress(false),
_should_allocate_from_space(false)
{
@@ -372,22 +370,18 @@
return success;
}
-
void DefNewGeneration::compute_new_size() {
- // This is called after a gc that includes the following generation
- // (which is required to exist.) So from-space will normally be empty.
+ // This is called after a GC that includes the old generation, so from-space
+ // will normally be empty.
// Note that we check both spaces, since if scavenge failed they revert roles.
- // If not we bail out (otherwise we would have to relocate the objects)
+ // If not we bail out (otherwise we would have to relocate the objects).
if (!from()->is_empty() || !to()->is_empty()) {
return;
}
- int next_level = level() + 1;
GenCollectedHeap* gch = GenCollectedHeap::heap();
- assert(next_level == 1, "DefNewGeneration must be a young gen");
- Generation* old_gen = gch->old_gen();
- size_t old_size = old_gen->capacity();
+ size_t old_size = gch->old_gen()->capacity();
size_t new_size_before = _virtual_space.committed_size();
size_t min_new_size = spec()->init_size();
size_t max_new_size = reserved().byte_size();
@@ -603,7 +597,7 @@
gch->rem_set()->prepare_for_younger_refs_iterate(false);
- assert(gch->no_allocs_since_save_marks(0),
+ assert(gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen),
"save marks have not been newly set.");
// Not very pretty.
@@ -619,11 +613,11 @@
false);
set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
- FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
+ FastEvacuateFollowersClosure evacuate_followers(gch,
&fsc_with_no_gc_barrier,
&fsc_with_gc_barrier);
- assert(gch->no_allocs_since_save_marks(0),
+ assert(gch->no_allocs_since_save_marks(GenCollectedHeap::YoungGen),
"save marks have not been newly set.");
{
@@ -633,7 +627,7 @@
StrongRootsScope srs(0);
gch->gen_process_roots(&srs,
- _level,
+ GenCollectedHeap::YoungGen,
true, // Process younger gens, if any,
// as strong roots.
GenCollectedHeap::SO_ScavengeCodeCache,
@@ -870,8 +864,10 @@
void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
size_t max_alloc_words) {
- if (requestor == this || _promotion_failed) return;
- assert(requestor->level() > level(), "DefNewGeneration must be youngest");
+ if (requestor == this || _promotion_failed) {
+ return;
+ }
+ assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
/* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
if (to_space->top() > to_space->bottom()) {
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -154,9 +154,9 @@
public: // was "protected" but caused compile error on win32
class IsAliveClosure: public BoolObjectClosure {
- Generation* _g;
+ Generation* _young_gen;
public:
- IsAliveClosure(Generation* g);
+ IsAliveClosure(Generation* young_gen);
bool do_object_b(oop p);
};
@@ -183,31 +183,28 @@
class EvacuateFollowersClosure: public VoidClosure {
GenCollectedHeap* _gch;
- int _level;
ScanClosure* _scan_cur_or_nonheap;
ScanClosure* _scan_older;
public:
- EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
+ EvacuateFollowersClosure(GenCollectedHeap* gch,
ScanClosure* cur, ScanClosure* older);
void do_void();
};
class FastEvacuateFollowersClosure: public VoidClosure {
GenCollectedHeap* _gch;
- int _level;
DefNewGeneration* _gen;
FastScanClosure* _scan_cur_or_nonheap;
FastScanClosure* _scan_older;
public:
- FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
- DefNewGeneration* gen,
+ FastEvacuateFollowersClosure(GenCollectedHeap* gch,
FastScanClosure* cur,
FastScanClosure* older);
void do_void();
};
public:
- DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
+ DefNewGeneration(ReservedSpace rs, size_t initial_byte_size,
const char* policy="Copy");
virtual void ref_processor_init();
--- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -36,6 +36,7 @@
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.hpp"
#include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/generation.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/modRefBarrierSet.hpp"
#include "gc/shared/referencePolicy.hpp"
@@ -53,8 +54,7 @@
#include "utilities/events.hpp"
#include "utilities/stack.inline.hpp"
-void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
- guarantee(level == 1, "We always collect both old and young.");
+void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -87,11 +87,11 @@
// Capture used regions for each generation that will be
// subject to collection, so that card table adjustments can
// be made intelligently (see clear / invalidate further below).
- gch->save_used_regions(level);
+ gch->save_used_regions();
allocate_stacks();
- mark_sweep_phase1(level, clear_all_softrefs);
+ mark_sweep_phase1(clear_all_softrefs);
mark_sweep_phase2();
@@ -99,7 +99,7 @@
COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
- mark_sweep_phase3(level);
+ mark_sweep_phase3();
mark_sweep_phase4();
@@ -184,8 +184,7 @@
_objarray_stack.clear(true);
}
-void GenMarkSweep::mark_sweep_phase1(int level,
- bool clear_all_softrefs) {
+void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
@@ -195,7 +194,6 @@
// use OopsInGenClosure constructor which takes a generation,
// as the Universe has not been created when the static constructors
// are run.
- assert(level == 1, "We don't use mark-sweep on young generations");
follow_root_closure.set_orig_generation(gch->old_gen());
// Need new claim bits before marking starts.
@@ -205,10 +203,10 @@
StrongRootsScope srs(1);
gch->gen_process_roots(&srs,
- level,
+ GenCollectedHeap::OldGen,
false, // Younger gens are not roots.
GenCollectedHeap::SO_None,
- GenCollectedHeap::StrongRootsOnly,
+ ClassUnloading,
&follow_root_closure,
&follow_root_closure,
&follow_cld_closure);
@@ -273,7 +271,7 @@
}
};
-void GenMarkSweep::mark_sweep_phase3(int level) {
+void GenMarkSweep::mark_sweep_phase3() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Adjust the pointers to reflect the new locations
@@ -286,14 +284,13 @@
// use OopsInGenClosure constructor which takes a generation,
// as the Universe has not been created when the static constructors
// are run.
- assert(level == 1, "We don't use mark-sweep on young generations.");
adjust_pointer_closure.set_orig_generation(gch->old_gen());
{
StrongRootsScope srs(1);
gch->gen_process_roots(&srs,
- level,
+ GenCollectedHeap::OldGen,
false, // Younger gens are not roots.
GenCollectedHeap::SO_AllCodeCache,
GenCollectedHeap::StrongAndWeakRoots,
--- a/hotspot/src/share/vm/gc/serial/genMarkSweep.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -31,17 +31,16 @@
friend class VM_MarkSweep;
friend class G1MarkSweep;
public:
- static void invoke_at_safepoint(int level, ReferenceProcessor* rp,
- bool clear_all_softrefs);
+ static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs);
private:
// Mark live objects
- static void mark_sweep_phase1(int level, bool clear_all_softrefs);
+ static void mark_sweep_phase1(bool clear_all_softrefs);
// Calculate new addresses
static void mark_sweep_phase2();
// Update pointers
- static void mark_sweep_phase3(int level);
+ static void mark_sweep_phase3();
// Move objects to new positions
static void mark_sweep_phase4();
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -41,9 +41,9 @@
#endif
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
- size_t initial_byte_size, int level,
+ size_t initial_byte_size,
GenRemSet* remset) :
- CardGeneration(rs, initial_byte_size, level, remset)
+ CardGeneration(rs, initial_byte_size, remset)
{
HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high();
@@ -134,11 +134,12 @@
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
}
-void TenuredGeneration::update_gc_stats(int current_level,
+void TenuredGeneration::update_gc_stats(Generation* current_generation,
bool full) {
- // If the next lower level(s) has been collected, gather any statistics
+ // If the young generation has been collected, gather any statistics
// that are of interest at this point.
- if (!full && (current_level + 1) == level()) {
+ bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
+ if (!full && current_is_young) {
// Calculate size of data promoted from the younger generations
// before doing the collection.
size_t used_before_gc = used();
@@ -192,7 +193,7 @@
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
- GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
+ GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
gc_timer->register_gc_end();
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -55,8 +55,9 @@
void assert_correct_size_change_locking();
public:
- TenuredGeneration(ReservedSpace rs, size_t initial_byte_size,
- int level, GenRemSet* remset);
+ TenuredGeneration(ReservedSpace rs,
+ size_t initial_byte_size,
+ GenRemSet* remset);
Generation::Name kind() { return Generation::MarkSweepCompact; }
@@ -120,7 +121,7 @@
// Statistics
- virtual void update_gc_stats(int level, bool full);
+ virtual void update_gc_stats(Generation* current_generation, bool full);
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
--- a/hotspot/src/share/vm/gc/shared/cardGeneration.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/cardGeneration.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -35,10 +35,10 @@
#include "memory/memRegion.hpp"
#include "runtime/java.hpp"
-CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
- int level,
+CardGeneration::CardGeneration(ReservedSpace rs,
+ size_t initial_byte_size,
GenRemSet* remset) :
- Generation(rs, initial_byte_size, level), _rs(remset),
+ Generation(rs, initial_byte_size), _rs(remset),
_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
_used_at_prologue()
{
--- a/hotspot/src/share/vm/gc/shared/cardGeneration.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/cardGeneration.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -52,8 +52,7 @@
size_t _capacity_at_prologue;
size_t _used_at_prologue;
- CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
- GenRemSet* remset);
+ CardGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset);
virtual void assert_correct_size_change_locking() = 0;
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -104,7 +104,9 @@
void CardTableRS::younger_refs_iterate(Generation* g,
OopsInGenClosure* blk,
uint n_threads) {
- _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
+ // The indexing in this array is slightly odd. We want to access
+ // the old generation record here, which is at index 2.
+ _last_cur_val_in_gen[2] = cur_youngergen_card_val();
g->younger_refs_iterate(blk, n_threads);
}
@@ -300,7 +302,8 @@
}
void CardTableRS::clear_into_younger(Generation* old_gen) {
- assert(old_gen->level() == 1, "Should only be called for the old generation");
+ assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
+ "Should only be called for the old generation");
// The card tables for the youngest gen need never be cleared.
// There's a bit of subtlety in the clear() and invalidate()
// methods that we exploit here and in invalidate_or_clear()
@@ -311,7 +314,8 @@
}
void CardTableRS::invalidate_or_clear(Generation* old_gen) {
- assert(old_gen->level() == 1, "Should only be called for the old generation");
+ assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
+ "Should only be called for the old generation");
// Invalidate the cards for the currently occupied part of
// the old generation and clear the cards for the
// unoccupied part of the generation (if any, making use
@@ -377,7 +381,9 @@
VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
void do_generation(Generation* gen) {
// Skip the youngest generation.
- if (gen->level() == 0) return;
+ if (GenCollectedHeap::heap()->is_young_gen(gen)) {
+ return;
+ }
// Normally, we're interested in pointers to younger generations.
VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
gen->space_iterate(&blk, true);
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/cardTableRS.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -76,9 +76,8 @@
// An array that contains, for each generation, the card table value last
// used as the current value for a younger_refs_do iteration of that
- // portion of the table. (The perm gen is index 0; other gens are at
- // their level plus 1. They youngest gen is in the table, but will
- // always have the value "clean_card".)
+ // portion of the table. The perm gen is index 0. The young gen is index 1,
+ // but will always have the value "clean_card". The old gen is index 2.
jbyte* _last_cur_val_in_gen;
jbyte _cur_youngergen_card_val;
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -746,11 +746,11 @@
return result; // Could be null if we are out of space.
} else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
// Do an incremental collection.
- gch->do_collection(false /* full */,
- false /* clear_all_soft_refs */,
- size /* size */,
- is_tlab /* is_tlab */,
- number_of_generations() - 1 /* max_level */);
+ gch->do_collection(false, // full
+ false, // clear_all_soft_refs
+ size, // size
+ is_tlab, // is_tlab
+ GenCollectedHeap::OldGen); // max_generation
} else {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print(" :: Trying full because partial may fail :: ");
@@ -759,11 +759,11 @@
// for the original code and why this has been simplified
// with from-space allocation criteria modified and
// such allocation moved out of the safepoint path.
- gch->do_collection(true /* full */,
- false /* clear_all_soft_refs */,
- size /* size */,
- is_tlab /* is_tlab */,
- number_of_generations() - 1 /* max_level */);
+ gch->do_collection(true, // full
+ false, // clear_all_soft_refs
+ size, // size
+ is_tlab, // is_tlab
+ GenCollectedHeap::OldGen); // max_generation
}
result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
@@ -787,11 +787,11 @@
{
UIntXFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
- gch->do_collection(true /* full */,
- true /* clear_all_soft_refs */,
- size /* size */,
- is_tlab /* is_tlab */,
- number_of_generations() - 1 /* max_level */);
+ gch->do_collection(true, // full
+ true, // clear_all_soft_refs
+ size, // size
+ is_tlab, // is_tlab
+ GenCollectedHeap::OldGen); // max_generation
}
result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -261,8 +261,6 @@
size_t initial_old_size() { return _initial_old_size; }
size_t max_old_size() { return _max_old_size; }
- int number_of_generations() { return 2; }
-
GenerationSpec* young_gen_spec() const {
assert(_young_gen_spec != NULL, "_young_gen_spec should have been initialized");
return _young_gen_spec;
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -127,11 +127,11 @@
set_barrier_set(rem_set()->bs());
ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
- _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
+ _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
- _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set());
+ _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
clear_incremental_collection_failed();
#if INCLUDE_ALL_GCS
@@ -202,12 +202,8 @@
return _young_gen->used() + _old_gen->used();
}
-// Save the "used_region" for generations level and lower.
-void GenCollectedHeap::save_used_regions(int level) {
- assert(level == 0 || level == 1, "Illegal level parameter");
- if (level == 1) {
- _old_gen->save_used_region();
- }
+void GenCollectedHeap::save_used_regions() {
+ _old_gen->save_used_region();
_young_gen->save_used_region();
}
@@ -337,8 +333,16 @@
record_gen_tops_before_GC();
if (PrintGC && Verbose) {
- gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
- gen->level(),
+ // I didn't want to change the logging when removing the level concept,
+ // but I guess this logging could say young/old or something instead of 0/1.
+ uint level;
+ if (heap()->is_young_gen(gen)) {
+ level = 0;
+ } else {
+ level = 1;
+ }
+ gclog_or_tty->print("level=%u invoke=%d size=" SIZE_FORMAT,
+ level,
gen->stat_record()->invocations,
size * HeapWordSize);
}
@@ -399,7 +403,7 @@
gen->stat_record()->accumulated_time.stop();
- update_gc_stats(gen->level(), full);
+ update_gc_stats(gen, full);
if (run_verification && VerifyAfterGC) {
HandleMark hm; // Discard invalid handles created during verification
@@ -412,11 +416,11 @@
}
}
-void GenCollectedHeap::do_collection(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool is_tlab,
- int max_level) {
+void GenCollectedHeap::do_collection(bool full,
+ bool clear_all_soft_refs,
+ size_t size,
+ bool is_tlab,
+ GenerationType max_generation) {
ResourceMark rm;
DEBUG_ONLY(Thread* my_thread = Thread::current();)
@@ -444,7 +448,7 @@
{
FlagSetting fl(_is_gc_active, true);
- bool complete = full && (max_level == 1 /* old */);
+ bool complete = full && (max_generation == OldGen);
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
@@ -458,9 +462,8 @@
bool run_verification = total_collections() >= VerifyGCStartAt;
bool prepared_for_verification = false;
- int max_level_collected = 0;
- bool old_collects_young = (max_level == 1) &&
- full &&
+ bool collected_old = false;
+ bool old_collects_young = complete &&
_old_gen->full_collects_younger_generations();
if (!old_collects_young &&
_young_gen->should_collect(full, size, is_tlab)) {
@@ -487,7 +490,7 @@
bool must_restore_marks_for_biased_locking = false;
- if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
+ if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
if (!complete) {
// The full_collections increment was missed above.
increment_total_full_collections();
@@ -510,13 +513,13 @@
true);
must_restore_marks_for_biased_locking = true;
- max_level_collected = 1;
+ collected_old = true;
}
// Update "complete" boolean wrt what actually transpired --
// for instance, a promotion failure could have led to
// a whole heap collection.
- complete = complete || (max_level_collected == 1 /* old */);
+ complete = complete || collected_old;
if (complete) { // We did a "major" collection
// FIXME: See comment at pre_full_gc_dump call
@@ -533,7 +536,7 @@
}
// Adjust generation sizes.
- if (max_level_collected == 1 /* old */) {
+ if (collected_old) {
_old_gen->compute_new_size();
}
_young_gen->compute_new_size();
@@ -661,11 +664,10 @@
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
}
-
}
void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
- int level,
+ GenerationType type,
bool younger_gens_as_roots,
ScanningOption so,
bool only_strong_roots,
@@ -675,7 +677,7 @@
const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
bool is_moving_collection = false;
- if (level == 0 || is_adjust_phase) {
+ if (type == YoungGen || is_adjust_phase) {
// young collections are always moving
is_moving_collection = true;
}
@@ -691,7 +693,7 @@
if (younger_gens_as_roots) {
if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
- if (level == 1) {
+ if (type == OldGen) {
not_older_gens->set_generation(_young_gen);
_young_gen->oop_iterate(not_older_gens);
}
@@ -699,8 +701,8 @@
}
}
// When collection is parallel, all threads get to cooperate to do
- // older-gen scanning.
- if (level == 0) {
+ // old generation scanning.
+ if (type == YoungGen) {
older_gens->set_generation(_old_gen);
rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
older_gens->reset_generation();
@@ -724,10 +726,10 @@
#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
void GenCollectedHeap:: \
-oop_since_save_marks_iterate(int level, \
+oop_since_save_marks_iterate(GenerationType gen, \
OopClosureType* cur, \
OopClosureType* older) { \
- if (level == 0) { \
+ if (gen == YoungGen) { \
_young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
_old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
} else { \
@@ -739,8 +741,8 @@
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
-bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
- if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
+bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) {
+ if (include_young && !_young_gen->no_allocs_since_save_marks()) {
return false;
}
return _old_gen->no_allocs_since_save_marks();
@@ -770,47 +772,47 @@
#endif // INCLUDE_ALL_GCS
} else if (cause == GCCause::_wb_young_gc) {
// minor collection for WhiteBox API
- collect(cause, 0 /* young */);
+ collect(cause, YoungGen);
} else {
#ifdef ASSERT
if (cause == GCCause::_scavenge_alot) {
// minor collection only
- collect(cause, 0 /* young */);
+ collect(cause, YoungGen);
} else {
// Stop-the-world full collection
- collect(cause, 1 /* old */);
+ collect(cause, OldGen);
}
#else
// Stop-the-world full collection
- collect(cause, 1 /* old */);
+ collect(cause, OldGen);
#endif
}
}
-void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
+void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
// The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
MutexLocker ml(Heap_lock);
- collect_locked(cause, max_level);
+ collect_locked(cause, max_generation);
}
void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
// The caller has the Heap_lock
assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
- collect_locked(cause, 1 /* old */);
+ collect_locked(cause, OldGen);
}
// this is the private collection interface
// The Heap_lock is expected to be held on entry.
-void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
+void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
// Read the GC count while holding the Heap_lock
unsigned int gc_count_before = total_collections();
unsigned int full_gc_count_before = total_full_collections();
{
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_GenCollectFull op(gc_count_before, full_gc_count_before,
- cause, max_level);
+ cause, max_generation);
VMThread::execute(&op);
}
}
@@ -853,39 +855,39 @@
#endif // INCLUDE_ALL_GCS
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
- do_full_collection(clear_all_soft_refs, 1 /* old */);
+ do_full_collection(clear_all_soft_refs, OldGen);
}
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
- int max_level) {
- int local_max_level;
+ GenerationType last_generation) {
+ GenerationType local_last_generation;
if (!incremental_collection_will_fail(false /* don't consult_young */) &&
gc_cause() == GCCause::_gc_locker) {
- local_max_level = 0;
+ local_last_generation = YoungGen;
} else {
- local_max_level = max_level;
+ local_last_generation = last_generation;
}
- do_collection(true /* full */,
- clear_all_soft_refs /* clear_all_soft_refs */,
- 0 /* size */,
- false /* is_tlab */,
- local_max_level /* max_level */);
+ do_collection(true, // full
+ clear_all_soft_refs, // clear_all_soft_refs
+ 0, // size
+ false, // is_tlab
+ local_last_generation); // last_generation
// Hack XXX FIX ME !!!
// A scavenge may not have been attempted, or may have
// been attempted and failed, because the old gen was too full
- if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
+ if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
incremental_collection_will_fail(false /* don't consult_young */)) {
if (PrintGCDetails) {
gclog_or_tty->print_cr("GC locker: Trying a full collection "
"because scavenge failed");
}
// This time allow the old gen to be collected as well
- do_collection(true /* full */,
- clear_all_soft_refs /* clear_all_soft_refs */,
- 0 /* size */,
- false /* is_tlab */,
- 1 /* old */ /* max_level */);
+ do_collection(true, // full
+ clear_all_soft_refs, // clear_all_soft_refs
+ 0, // size
+ false, // is_tlab
+ OldGen); // last_generation
}
}
@@ -1108,12 +1110,8 @@
_young_gen->prepare_for_compaction(&cp);
}
-GCStats* GenCollectedHeap::gc_stats(int level) const {
- if (level == 0) {
- return _young_gen->gc_stats();
- } else {
- return _old_gen->gc_stats();
- }
+GCStats* GenCollectedHeap::gc_stats(Generation* gen) const {
+ return gen->gc_stats();
}
void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
@@ -1283,7 +1281,7 @@
oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
oop obj,
size_t obj_size) {
- guarantee(old_gen->level() == 1, "We only get here with an old generation");
+ guarantee(old_gen == _old_gen, "We only get here with an old generation");
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
HeapWord* result = NULL;
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -55,6 +55,11 @@
public:
friend class VM_PopulateDumpSharedSpace;
+ enum GenerationType {
+ YoungGen,
+ OldGen
+ };
+
private:
Generation* _young_gen;
Generation* _old_gen;
@@ -95,11 +100,11 @@
// Helper function for two callbacks below.
// Considers collection of the first max_level+1 generations.
- void do_collection(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool is_tlab,
- int max_level);
+ void do_collection(bool full,
+ bool clear_all_soft_refs,
+ size_t size,
+ bool is_tlab,
+ GenerationType max_generation);
// Callback from VM_GenCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an
@@ -110,7 +115,7 @@
// Callback from VM_GenCollectFull operation.
// Perform a full collection of the first max_level+1 generations.
virtual void do_full_collection(bool clear_all_soft_refs);
- void do_full_collection(bool clear_all_soft_refs, int max_level);
+ void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
// Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs?
@@ -121,7 +126,7 @@
FlexibleWorkGang* workers() const { return _workers; }
- GCStats* gc_stats(int level) const;
+ GCStats* gc_stats(Generation* generation) const;
// Returns JNI_OK on success
virtual jint initialize();
@@ -142,6 +147,9 @@
Generation* young_gen() const { return _young_gen; }
Generation* old_gen() const { return _old_gen; }
+ bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
+ bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
+
// The generational collector policy.
GenCollectorPolicy* gen_policy() const { return _gen_policy; }
@@ -160,8 +168,8 @@
size_t capacity() const;
size_t used() const;
- // Save the "used_region" for generations level and lower.
- void save_used_regions(int level);
+ // Save the "used_region" for both generations.
+ void save_used_regions();
size_t max_capacity() const;
@@ -182,9 +190,9 @@
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
- // Perform a full collection of the first max_level+1 generations.
+ // Perform a full collection of generations up to and including max_generation.
// Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
- void collect(GCCause::Cause cause, int max_level);
+ void collect(GCCause::Cause cause, GenerationType max_generation);
// Returns "TRUE" iff "p" points into the committed areas of the heap.
// The methods is_in(), is_in_closed_subset() and is_in_youngest() may
@@ -314,10 +322,8 @@
}
// Update the gc statistics for each generation.
- // "level" is the level of the latest collection.
- void update_gc_stats(int current_level, bool full) {
- _young_gen->update_gc_stats(current_level, full);
- _old_gen->update_gc_stats(current_level, full);
+ void update_gc_stats(Generation* current_generation, bool full) {
+ _old_gen->update_gc_stats(current_generation, full);
}
bool no_gc_in_progress() { return !is_gc_active(); }
@@ -365,8 +371,8 @@
static GenCollectedHeap* heap();
// Invoke the "do_oop" method of one of the closures "not_older_gens"
- // or "older_gens" on root locations for the generation at
- // "level". (The "older_gens" closure is used for scanning references
+ // or "older_gens" on root locations for the generations depending on
+ // the type. (The "older_gens" closure is used for scanning references
// from older generations; "not_older_gens" is used everywhere else.)
// If "younger_gens_as_roots" is false, younger generations are
// not scanned as roots; in this case, the caller must be arranging to
@@ -396,7 +402,7 @@
static const bool StrongRootsOnly = true;
void gen_process_roots(StrongRootsScope* scope,
- int level,
+ GenerationType type,
bool younger_gens_as_roots,
ScanningOption so,
bool only_strong_roots,
@@ -420,7 +426,7 @@
// applied to references in the generation at "level", and the "older"
// closure to older generations.
#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
- void oop_since_save_marks_iterate(int level, \
+ void oop_since_save_marks_iterate(GenerationType start_gen, \
OopClosureType* cur, \
OopClosureType* older);
@@ -428,21 +434,17 @@
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
- // Returns "true" iff no allocations have occurred in any generation at
- // "level" or above since the last
+ // Returns "true" iff no allocations have occurred since the last
// call to "save_marks".
- bool no_allocs_since_save_marks(int level);
+ bool no_allocs_since_save_marks(bool include_young);
// Returns true if an incremental collection is likely to fail.
// We optionally consult the young gen, if asked to do so;
// otherwise we base our answer on whether the previous incremental
// collection attempt failed with no corrective action as of yet.
bool incremental_collection_will_fail(bool consult_young) {
- // Assumes a 2-generation system; the first disjunct remembers if an
- // incremental collection failed, even when we thought (second disjunct)
- // that it would not.
- assert(heap()->collector_policy()->is_generation_policy(),
- "the following definition may not be suitable for an n(>2)-generation system");
+ // The first disjunct remembers if an incremental collection failed, even
+ // when we thought (second disjunct) that it would not.
return incremental_collection_failed() ||
(consult_young && !_young_gen->collection_attempt_is_safe());
}
@@ -482,10 +484,10 @@
// iterating over spaces.
void prepare_for_compaction();
- // Perform a full collection of the first max_level+1 generations.
+ // Perform a full collection of the generations up to and including max_generation.
// This is the low level interface used by the public versions of
// collect() and collect_locked(). Caller holds the Heap_lock on entry.
- void collect_locked(GCCause::Cause cause, int max_level);
+ void collect_locked(GCCause::Cause cause, GenerationType max_generation);
// Returns success or failure.
bool create_cms_collector();
--- a/hotspot/src/share/vm/gc/shared/generation.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/generation.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -42,8 +42,7 @@
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
-Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
- _level(level),
+Generation::Generation(ReservedSpace rs, size_t initial_size) :
_ref_processor(NULL) {
if (!_virtual_space.initialize(rs, initial_size)) {
vm_exit_during_initialization("Could not reserve enough space for "
@@ -61,8 +60,10 @@
GenerationSpec* Generation::spec() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
- assert(level() == 0 || level() == 1, "Bad gen level");
- return level() == 0 ? gch->gen_policy()->young_gen_spec() : gch->gen_policy()->old_gen_spec();
+ if (gch->is_young_gen(this)) {
+ return gch->gen_policy()->young_gen_spec();
+ }
+ return gch->gen_policy()->old_gen_spec();
}
size_t Generation::max_capacity() const {
@@ -111,9 +112,17 @@
void Generation::print_summary_info_on(outputStream* st) {
StatRecord* sr = stat_record();
double time = sr->accumulated_time.seconds();
+ // I didn't want to change the logging when removing the level concept,
+ // but I guess this logging could say young/old or something instead of 0/1.
+ uint level;
+ if (GenCollectedHeap::heap()->is_young_gen(this)) {
+ level = 0;
+ } else {
+ level = 1;
+ }
st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
- "%d GC's, avg GC time %3.7f]",
- level(), time, sr->invocations,
+ "%u GC's, avg GC time %3.7f]",
+ level, time, sr->invocations,
sr->invocations > 0 ? time / sr->invocations : 0.0);
}
@@ -149,25 +158,14 @@
return blk.sp != NULL;
}
-Generation* Generation::next_gen() const {
- GenCollectedHeap* gch = GenCollectedHeap::heap();
- if (level() == 0) {
- return gch->old_gen();
- } else {
- return NULL;
- }
-}
-
size_t Generation::max_contiguous_available() const {
// The largest number of contiguous free words in this or any higher generation.
- size_t max = 0;
- for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
- size_t avail = gen->contiguous_available();
- if (avail > max) {
- max = avail;
- }
+ size_t avail = contiguous_available();
+ size_t old_avail = 0;
+ if (GenCollectedHeap::heap()->is_young_gen(this)) {
+ old_avail = GenCollectedHeap::heap()->old_gen()->contiguous_available();
}
- return max;
+ return MAX2(avail, old_avail);
}
bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
--- a/hotspot/src/share/vm/gc/shared/generation.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/generation.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -98,9 +98,6 @@
// Memory area reserved for generation
VirtualSpace _virtual_space;
- // Level in the generation hierarchy.
- int _level;
-
// ("Weak") Reference processing support
ReferenceProcessor* _ref_processor;
@@ -110,12 +107,8 @@
// Statistics for garbage collection
GCStats* _gc_stats;
- // Returns the next generation in the configuration, or else NULL if this
- // is the highest generation.
- Generation* next_gen() const;
-
// Initialize the generation.
- Generation(ReservedSpace rs, size_t initial_byte_size, int level);
+ Generation(ReservedSpace rs, size_t initial_byte_size);
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
// "sp" that point into younger generations.
@@ -409,15 +402,14 @@
_time_of_last_gc = now;
}
- // Generations may keep statistics about collection. This
- // method updates those statistics. current_level is
- // the level of the collection that has most recently
- // occurred. This allows the generation to decide what
- // statistics are valid to collect. For example, the
- // generation can decide to gather the amount of promoted data
- // if the collection of the younger generations has completed.
+ // Generations may keep statistics about collection. This method
+ // updates those statistics. current_generation is the generation
+ // that was most recently collected. This allows the generation to
+ // decide what statistics are valid to collect. For example, the
+ // generation can decide to gather the amount of promoted data if
+ // the collection of the younger generations has completed.
GCStats* gc_stats() const { return _gc_stats; }
- virtual void update_gc_stats(int current_level, bool full) {}
+ virtual void update_gc_stats(Generation* current_generation, bool full) {}
// Mark sweep support phase2
virtual void prepare_for_compaction(CompactPoint* cp);
@@ -502,8 +494,6 @@
virtual const char* name() const = 0;
virtual const char* short_name() const = 0;
- int level() const { return _level; }
-
// Reference Processing accessor
ReferenceProcessor* const ref_processor() { return _ref_processor; }
--- a/hotspot/src/share/vm/gc/shared/generationSpec.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/generationSpec.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -36,18 +36,17 @@
#include "gc/cms/parNewGeneration.hpp"
#endif // INCLUDE_ALL_GCS
-Generation* GenerationSpec::init(ReservedSpace rs, int level,
- GenRemSet* remset) {
+Generation* GenerationSpec::init(ReservedSpace rs, GenRemSet* remset) {
switch (name()) {
case Generation::DefNew:
- return new DefNewGeneration(rs, init_size(), level);
+ return new DefNewGeneration(rs, init_size());
case Generation::MarkSweepCompact:
- return new TenuredGeneration(rs, init_size(), level, remset);
+ return new TenuredGeneration(rs, init_size(), remset);
#if INCLUDE_ALL_GCS
case Generation::ParNew:
- return new ParNewGeneration(rs, init_size(), level);
+ return new ParNewGeneration(rs, init_size());
case Generation::ConcurrentMarkSweep: {
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
@@ -61,7 +60,7 @@
ConcurrentMarkSweepGeneration* g = NULL;
g = new ConcurrentMarkSweepGeneration(rs,
- init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
+ init_size(), ctrs, UseCMSAdaptiveFreeLists,
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
g->initialize_performance_counters();
--- a/hotspot/src/share/vm/gc/shared/generationSpec.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/generationSpec.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -45,7 +45,7 @@
_max_size(align_size_up(max_size, alignment))
{ }
- Generation* init(ReservedSpace rs, int level, GenRemSet* remset);
+ Generation* init(ReservedSpace rs, GenRemSet* remset);
// Accessors
Generation::Name name() const { return _name; }
--- a/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -184,7 +184,7 @@
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
- gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
+ gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
}
// Returns true iff concurrent GCs unloads metadata.
--- a/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -26,6 +26,7 @@
#define SHARE_VM_GC_SHARED_VMGCOPERATIONS_HPP
#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
#include "memory/heapInspection.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.hpp"
@@ -193,14 +194,14 @@
// GenCollectedHeap heap.
class VM_GenCollectFull: public VM_GC_Operation {
private:
- int _max_level;
+ GenCollectedHeap::GenerationType _max_generation;
public:
VM_GenCollectFull(uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause,
- int max_level)
+ GenCollectedHeap::GenerationType max_generation)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
- _max_level(max_level) { }
+ _max_generation(max_generation) { }
~VM_GenCollectFull() {}
virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit();
--- a/hotspot/src/share/vm/memory/filemap.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/memory/filemap.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -649,8 +649,8 @@
return base;
}
-MemRegion *string_ranges = NULL;
-int num_ranges = 0;
+static MemRegion *string_ranges = NULL;
+static int num_ranges = 0;
bool FileMapInfo::map_string_regions() {
#if INCLUDE_ALL_GCS
if (UseG1GC && UseCompressedOops && UseCompressedClassPointers) {
@@ -749,7 +749,10 @@
}
void FileMapInfo::fixup_string_regions() {
- if (string_ranges != NULL) {
+ // If any string regions were found, call the fill routine to make them parseable.
+ // Note that string_ranges may be non-NULL even if no ranges were found.
+ if (num_ranges != 0) {
+ assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
}
}
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -381,6 +381,9 @@
if (!constants()->is_shared()) {
MetadataFactory::free_metadata(loader_data, constants());
}
+ // Delete any cached resolution errors for the constant pool
+ SystemDictionary::delete_resolution_error(constants());
+
set_constants(NULL);
}
--- a/hotspot/src/share/vm/opto/graphKit.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -3803,6 +3803,11 @@
Node* zero = __ ConI(0); // Dirty card value
BasicType bt = T_BYTE;
+ if (UseConcMarkSweepGC && UseCondCardMark) {
+ insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
+ __ sync_kit(this);
+ }
+
if (UseCondCardMark) {
// The classic GC reference write barrier is typically implemented
// as a store into the global card mark table. Unfortunately
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -4089,9 +4089,6 @@
mnt->adjust_method_entries(the_class(), &trace_name_printed);
}
- // Fix Resolution Error table also to remove old constant pools
- SystemDictionary::delete_resolution_error(old_constants);
-
if (the_class->oop_map_cache() != NULL) {
// Flush references to any obsolete methods from the oop map cache
// so that obsolete methods are not pinned.
--- a/hotspot/src/share/vm/runtime/arguments.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -1389,6 +1389,12 @@
if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
}
+
+ if (!ClassUnloading) {
+ FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
+ FLAG_SET_CMDLINE(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false);
+ }
+
if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
(unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
--- a/hotspot/src/share/vm/runtime/os.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/runtime/os.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -832,7 +832,7 @@
}
}
-void os::print_cpu_info(outputStream* st) {
+void os::print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// cpu
st->print("CPU:");
st->print("total %d", os::processor_count());
@@ -840,7 +840,7 @@
// st->print("(active %d)", os::active_processor_count());
st->print(" %s", VM_Version::cpu_features());
st->cr();
- pd_print_cpu_info(st);
+ pd_print_cpu_info(st, buf, buflen);
}
void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) {
--- a/hotspot/src/share/vm/runtime/os.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/runtime/os.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -588,8 +588,8 @@
// Output format may be different on different platforms.
static void print_os_info(outputStream* st);
static void print_os_info_brief(outputStream* st);
- static void print_cpu_info(outputStream* st);
- static void pd_print_cpu_info(outputStream* st);
+ static void print_cpu_info(outputStream* st, char* buf, size_t buflen);
+ static void pd_print_cpu_info(outputStream* st, char* buf, size_t buflen);
static void print_memory_info(outputStream* st);
static void print_dll_info(outputStream* st);
static void print_environment_variables(outputStream* st, const char** env_list);
--- a/hotspot/src/share/vm/runtime/thread.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/runtime/thread.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -3274,6 +3274,9 @@
jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
extern void JDK_Version_init();
+ // Preinitialize version info.
+ VM_Version::early_initialize();
+
// Check version
if (!is_supported_jni_version(args->version)) return JNI_EVERSION;
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -545,7 +545,6 @@
\
nonstatic_field(Generation, _reserved, MemRegion) \
nonstatic_field(Generation, _virtual_space, VirtualSpace) \
- nonstatic_field(Generation, _level, int) \
nonstatic_field(Generation, _stat_record, Generation::StatRecord) \
\
nonstatic_field(Generation::StatRecord, invocations, int) \
--- a/hotspot/src/share/vm/runtime/vm_version.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/runtime/vm_version.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -280,7 +280,8 @@
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
- os::print_cpu_info(tty);
+ char buf[512];
+ os::print_cpu_info(tty, buf, sizeof(buf));
}
#endif
}
--- a/hotspot/src/share/vm/runtime/vm_version.hpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/runtime/vm_version.hpp Fri Jun 19 13:03:58 2015 +0000
@@ -58,6 +58,13 @@
public:
static void initialize();
+ // This allows for early initialization of VM_Version information
+ // that may be needed later in the initialization sequence but before
+ // full VM_Version initialization is possible. It can not depend on any
+ // other part of the VM being initialized when called. Platforms that
+ // need to specialize this define VM_Version::early_initialize().
+ static void early_initialize() { }
+
// Name
static const char* vm_name();
// Vendor
--- a/hotspot/src/share/vm/services/memoryService.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/services/memoryService.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -127,7 +127,6 @@
assert(policy->is_generation_policy(), "Only support two generations");
GenCollectorPolicy* gen_policy = policy->as_generation_policy();
- guarantee(gen_policy->number_of_generations() == 2, "Only support two-generation heap");
if (gen_policy != NULL) {
Generation::Name kind = gen_policy->young_gen_spec()->name();
switch (kind) {
--- a/hotspot/src/share/vm/utilities/vmError.cpp Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/src/share/vm/utilities/vmError.cpp Fri Jun 19 13:03:58 2015 +0000
@@ -816,7 +816,7 @@
STEP(250, "(printing CPU info)" )
if (_verbose) {
- os::print_cpu_info(st);
+ os::print_cpu_info(st, buf, sizeof(buf));
st->cr();
}
--- a/hotspot/test/gc/survivorAlignment/TestAllocationInEden.java Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/test/gc/survivorAlignment/TestAllocationInEden.java Fri Jun 19 13:03:58 2015 +0000
@@ -35,42 +35,42 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
- * -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
+ * -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB -XX:-ResizePLAB
* -XX:OldSize=128m -XX:MaxHeapSize=192m
* -XX:-ExplicitGCInvokesConcurrent
* TestAllocationInEden 10m 9 EDEN
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
- * -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
+ * -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB -XX:-ResizePLAB
* -XX:OldSize=128m -XX:MaxHeapSize=192m
* -XX:-ExplicitGCInvokesConcurrent
* TestAllocationInEden 10m 47 EDEN
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
- * -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
+ * -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB -XX:-ResizePLAB
* -XX:OldSize=128m -XX:MaxHeapSize=192m
* -XX:-ExplicitGCInvokesConcurrent
* TestAllocationInEden 10m 9 EDEN
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
- * -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
+ * -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB -XX:-ResizePLAB
* -XX:OldSize=128m -XX:MaxHeapSize=192m
* -XX:-ExplicitGCInvokesConcurrent
* TestAllocationInEden 10m 87 EDEN
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
- * -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
+ * -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB -XX:-ResizePLAB
* -XX:OldSize=128m -XX:MaxHeapSize=192m
* -XX:-ExplicitGCInvokesConcurrent
* TestAllocationInEden 10m 9 EDEN
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
- * -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
+ * -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB -XX:-ResizePLAB
* -XX:OldSize=128m -XX:MaxHeapSize=192m
* -XX:-ExplicitGCInvokesConcurrent
* TestAllocationInEden 10m 147 EDEN
--- a/hotspot/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java Fri Jun 19 13:03:58 2015 +0000
@@ -36,42 +36,42 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
- * -XX:-ExplicitGCInvokesConcurrent
+ * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32
* TestPromotionFromEdenToTenured 10m 9 TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
- * -XX:-ExplicitGCInvokesConcurrent
+ * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32
* TestPromotionFromEdenToTenured 10m 47 TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:OldSize=32m -XX:MaxHeapSize=96m
- * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64
* TestPromotionFromEdenToTenured 10m 9 TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:OldSize=32m -XX:MaxHeapSize=128m
- * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64
* TestPromotionFromEdenToTenured 10m 87 TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:OldSize=32M -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
- * -XX:-ExplicitGCInvokesConcurrent
+ * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128
* TestPromotionFromEdenToTenured 10m 9 TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
* -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
- * -XX:-ExplicitGCInvokesConcurrent
+ * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128
* TestPromotionFromEdenToTenured 10m 147 TENURED
--- a/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java Fri Jun 19 13:03:58 2015 +0000
@@ -36,13 +36,13 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:OldSize=32m -XX:MaxHeapSize=160m
- * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32
* TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
- * -XX:OldSize=32m -XX:MaxHeapSize=160m
+ * -XX:OldSize=32m -XX:MaxHeapSize=160m -XX:-ResizePLAB
* -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32
@@ -51,14 +51,14 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
* -XX:OldSize=32m -XX:MaxHeapSize=232m
- * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64
* TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:OldSize=32m -XX:MaxHeapSize=160m
- * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64
* TestPromotionFromSurvivorToTenuredAfterFullGC 20m 87
@@ -66,7 +66,7 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
* -XX:OldSize=32M -XX:MaxHeapSize=288m
- * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128
* TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9
@@ -74,7 +74,7 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:OldSize=32m -XX:MaxHeapSize=160m
- * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ * -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128
* TestPromotionFromSurvivorToTenuredAfterFullGC 20m 147
--- a/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java Fri Jun 19 13:03:58 2015 +0000
@@ -36,7 +36,7 @@
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
- * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 -XX:-ResizePLAB
* -XX:-ExplicitGCInvokesConcurrent
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32
@@ -44,7 +44,7 @@
* TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
- * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 -XX:-ResizePLAB
* -XX:-ExplicitGCInvokesConcurrent
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32
@@ -52,7 +52,7 @@
* TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
- * -XX:OldSize=32M -XX:MaxHeapSize=232m -XX:SurvivorRatio=1
+ * -XX:OldSize=32M -XX:MaxHeapSize=232m -XX:SurvivorRatio=1 -XX:-ResizePLAB
* -XX:-ExplicitGCInvokesConcurrent
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64
@@ -60,7 +60,7 @@
* TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
- * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 -XX:-ResizePLAB
* -XX:-ExplicitGCInvokesConcurrent
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64
@@ -68,7 +68,7 @@
* TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
- * -XX:OldSize=32M -XX:MaxHeapSize=288m -XX:SurvivorRatio=1
+ * -XX:OldSize=32M -XX:MaxHeapSize=288m -XX:SurvivorRatio=1 -XX:-ResizePLAB
* -XX:-ExplicitGCInvokesConcurrent
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128
@@ -76,7 +76,7 @@
* TENURED
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
- * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ * -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1 -XX:-ResizePLAB
* -XX:-ExplicitGCInvokesConcurrent
* -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128
--- a/hotspot/test/gc/survivorAlignment/TestPromotionToSurvivor.java Fri Jun 19 07:57:31 2015 +0300
+++ b/hotspot/test/gc/survivorAlignment/TestPromotionToSurvivor.java Fri Jun 19 13:03:58 2015 +0000
@@ -37,37 +37,37 @@
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
- * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* TestPromotionToSurvivor 10m 9 SURVIVOR
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
- * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* TestPromotionToSurvivor 20m 47 SURVIVOR
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
- * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* TestPromotionToSurvivor 8m 9 SURVIVOR
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
- * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* TestPromotionToSurvivor 20m 87 SURVIVOR
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m
- * -XX:MaxHeapSize=384m -XX:-ExplicitGCInvokesConcurrent
+ * -XX:MaxHeapSize=384m -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* TestPromotionToSurvivor 10m 9 SURVIVOR
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
* -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m
- * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ * -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB
* TestPromotionToSurvivor 20m 147 SURVIVOR
*/
public class TestPromotionToSurvivor {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/RedefineTests/RedefineRunningMethodsWithResolutionErrors.java Fri Jun 19 13:03:58 2015 +0000
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8076110
+ * @summary Redefine running methods that have cached resolution errors
+ * @library /testlibrary
+ * @modules java.instrument
+ * java.base/jdk.internal.org.objectweb.asm
+ * @build RedefineClassHelper
+ * @run main RedefineClassHelper
+ * @run main/othervm -javaagent:redefineagent.jar -XX:TraceRedefineClasses=0x600 RedefineRunningMethodsWithResolutionErrors
+ */
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.Label;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+
+import java.lang.reflect.InvocationTargetException;
+
+public class RedefineRunningMethodsWithResolutionErrors extends ClassLoader implements Opcodes {
+
+ @Override
+ protected Class<?> findClass(String name) throws ClassNotFoundException {
+ if (name.equals("C")) {
+ byte[] b = loadC(false);
+ return defineClass(name, b, 0, b.length);
+ } else {
+ return super.findClass(name);
+ }
+ }
+
+ private static byte[] loadC(boolean redefine) {
+ ClassWriter cw = new ClassWriter(0);
+
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, "C", null, "java/lang/Object", null);
+ {
+ MethodVisitor mv;
+
+ mv = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "m", "()V", null, null);
+ mv.visitCode();
+
+ // First time we run we will:
+ // 1) Cache resolution errors
+ // 2) Redefine the class / method
+ // 3) Try to read the resolution errors that were cached
+ //
+ // The redefined method will never run, throw error to be sure
+ if (redefine) {
+ createThrowRuntimeExceptionCode(mv, "The redefined method was called");
+ } else {
+ createMethodBody(mv);
+ }
+ mv.visitMaxs(3, 0);
+ mv.visitEnd();
+ }
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+
+ private static void createMethodBody(MethodVisitor mv) {
+ Label classExists = new Label();
+
+ // Cache resolution errors
+ createLoadNonExistentClassCode(mv, classExists);
+
+ // Redefine our own class and method
+ mv.visitMethodInsn(INVOKESTATIC, "RedefineRunningMethodsWithResolutionErrors", "redefine", "()V");
+
+ // Provoke the same error again to make sure the resolution error cache works
+ createLoadNonExistentClassCode(mv, classExists);
+
+ // Test passed
+ mv.visitInsn(RETURN);
+
+ mv.visitFrame(F_SAME, 0, new Object[0], 0, new Object[0]);
+ mv.visitLabel(classExists);
+
+ createThrowRuntimeExceptionCode(mv, "Loaded class that shouldn't exist (\"NonExistentClass\")");
+ }
+
+ private static void createLoadNonExistentClassCode(MethodVisitor mv, Label classExists) {
+ Label tryLoadBegin = new Label();
+ Label tryLoadEnd = new Label();
+ Label catchLoadBlock = new Label();
+ mv.visitTryCatchBlock(tryLoadBegin, tryLoadEnd, catchLoadBlock, "java/lang/NoClassDefFoundError");
+
+ // Try to load a class that does not exist to provoke resolution errors
+ mv.visitLabel(tryLoadBegin);
+ mv.visitMethodInsn(INVOKESTATIC, "NonExistentClass", "nonExistentMethod", "()V");
+ mv.visitLabel(tryLoadEnd);
+
+ // No NoClassDefFoundError means NonExistentClass existed, which shouldn't happen
+ mv.visitJumpInsn(GOTO, classExists);
+
+ mv.visitFrame(F_SAME1, 0, new Object[0], 1, new Object[] { "java/lang/NoClassDefFoundError" });
+ mv.visitLabel(catchLoadBlock);
+
+ // Ignore the expected NoClassDefFoundError
+ mv.visitInsn(POP);
+ }
+
+ private static void createThrowRuntimeExceptionCode(MethodVisitor mv, String msg) {
+ mv.visitTypeInsn(NEW, "java/lang/RuntimeException");
+ mv.visitInsn(DUP);
+ mv.visitLdcInsn(msg);
+ mv.visitMethodInsn(INVOKESPECIAL, "java/lang/RuntimeException", "<init>", "(Ljava/lang/String;)V");
+ mv.visitInsn(ATHROW);
+ }
+
+ private static Class<?> c;
+
+ public static void redefine() throws Exception {
+ RedefineClassHelper.redefineClass(c, loadC(true));
+ }
+
+ public static void main(String[] args) throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException {
+ c = Class.forName("C", true, new RedefineRunningMethodsWithResolutionErrors());
+ c.getMethod("m").invoke(null);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/classFileParserBug/AnnotationTag.java Fri Jun 19 13:03:58 2015 +0000
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8042041
+ * @summary Fuzzy-ed RuntimeVisibleAnnotations causes assertion
+ * @compile badAnnotTag.jcod
+ * @run main AnnotationTag
+ */
+
+// Test that a bad element_tag in an element_value of a RuntimeVisibileAnnotation
+// attribute is ignored.
+public class AnnotationTag {
+ public static void main(String args[]) throws Throwable {
+
+ System.out.println("Regression test for bug 8042041");
+ try {
+ Class newClass = Class.forName("badAnnotTag");
+ } catch (java.lang.Throwable e) {
+ throw new RuntimeException(
+ "Unexpected exception: " + e.getMessage());
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/classFileParserBug/badAnnotTag.jcod Fri Jun 19 13:03:58 2015 +0000
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// This class contains a bad element_tag in an element_value structure for a
+// RuntimeVisibleAnnotation at line 91. The bad element tag should be ignored
+// by the class file parser.
+class badAnnotTag {
+ 0xCAFEBABE;
+ 0; // minor version
+ 52; // version
+ [19] { // Constant Pool
+ ; // first element is empty
+ class #16; // #1 at 0x0A
+ class #17; // #2 at 0x0D
+ class #18; // #3 at 0x10
+ Utf8 "value"; // #4 at 0x13
+ Utf8 "()Ljava/lang/String;"; // #5 at 0x1B
+ Utf8 "SourceFile"; // #6 at 0x32
+ Utf8 "badAnnotTag.java"; // #7 at 0x3F
+ Utf8 "RuntimeVisibleAnnotations"; // #8 at 0x50
+ Utf8 "Ljava/lang/annotation/Target;"; // #9 at 0x6C
+ Utf8 "Ljava/lang/annotation/ElementType;"; // #10 at 0x8C
+ Utf8 "TYPE_USE"; // #11 at 0xB1
+ Utf8 "TYPE_PARAMETER"; // #12 at 0xBC
+ Utf8 "Ljava/lang/annotation/Retention;"; // #13 at 0xCD
+ Utf8 "Ljava/lang/annotation/RetentionPolicy;"; // #14 at 0xF0
+ Utf8 "RUNTIME"; // #15 at 0x0119
+ Utf8 "badAnnotTag"; // #16 at 0x0123
+ Utf8 "java/lang/Object"; // #17 at 0x0127
+ Utf8 "java/lang/annotation/Annotation"; // #18 at 0x013A
+ } // Constant Pool
+
+ 0x2600; // access
+ #1;// this_cpx
+ #2;// super_cpx
+
+ [1] { // Interfaces
+ #3;
+ } // Interfaces
+
+ [0] { // fields
+ } // fields
+
+ [1] { // methods
+ { // Member at 0x016A
+ 0x0401; // access
+ #4; // name_cpx
+ #5; // sig_cpx
+ [0] { // Attributes
+ } // Attributes
+ } // Member
+ } // methods
+
+ [2] { // Attributes
+ Attr(#6, 2) { // SourceFile at 0x0174
+ #7;
+ } // end SourceFile
+ ;
+ Attr(#8, 32) { // RuntimeVisibleAnnotations at 0x017C
+ [2] { // annotations
+ { // annotation
+ #9;
+ [1] { // element_value_pairs
+ { // element value pair
+ #4;
+ { // element_value
+ '[';
+ [2] { // array_value
+ { // element_value
+ 'd'; // * illegal value *, correct value is 'e'
+ { // enum_const_value
+ #10;
+ #11;
+ } // enum_const_value
+ } // element_value
+ ;
+ { // element_value
+ 'e';
+ { // enum_const_value
+ #10;
+ #12;
+ } // enum_const_value
+ } // element_value
+ } // array_value
+ } // element_value
+ } // element value pair
+ } // element_value_pairs
+ } // annotation
+ ;
+ { // annotation
+ #13;
+ [1] { // element_value_pairs
+ { // element value pair
+ #4;
+ { // element_value
+ 'e';
+ { // enum_const_value
+ #14;
+ #15;
+ } // enum_const_value
+ } // element_value
+ } // element value pair
+ } // element_value_pairs
+ } // annotation
+ }
+ } // end RuntimeVisibleAnnotations
+ } // Attributes
+} // end class badAnnotTag