--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1Allocator.java Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,40 @@
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class G1Allocator extends VMObject {
+
+ //size_t _summary_bytes_used;
+ static private CIntegerField summaryBytesUsedField;
+
+ static {
+ VM.registerVMInitializedObserver(new Observer() {
+ public void update(Observable o, Object data) {
+ initialize(VM.getVM().getTypeDataBase());
+ }
+ });
+ }
+
+ static private synchronized void initialize(TypeDataBase db) {
+ Type type = db.lookupType("G1Allocator");
+
+ summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+ }
+
+ public long getSummaryBytes() {
+ return summaryBytesUsedField.getValue(addr);
+ }
+
+ public G1Allocator(Address addr) {
+ super(addr);
+
+ }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Fri Sep 26 06:07:48 2014 +0000
@@ -36,7 +36,6 @@
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
-import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
@@ -47,8 +46,8 @@
static private long hrmFieldOffset;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
- // size_t _summary_bytes_used;
- static private CIntegerField summaryBytesUsedField;
+ // G1Allocator* _allocator
+ static private AddressField g1Allocator;
// G1MonitoringSupport* _g1mm;
static private AddressField g1mmField;
// HeapRegionSet _old_set;
@@ -68,7 +67,7 @@
Type type = db.lookupType("G1CollectedHeap");
hrmFieldOffset = type.getField("_hrm").getOffset();
- summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+ g1Allocator = type.getAddressField("_allocator");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
@@ -79,7 +78,7 @@
}
public long used() {
- return summaryBytesUsedField.getValue(addr);
+ return allocator().getSummaryBytes();
}
public long n_regions() {
@@ -97,6 +96,11 @@
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
}
+ public G1Allocator allocator() {
+ Address g1AllocatorAddr = g1Allocator.getValue(addr);
+ return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
+ }
+
public HeapRegionSetBase oldSet() {
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
--- a/hotspot/make/bsd/makefiles/vm.make Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/make/bsd/makefiles/vm.make Fri Sep 26 06:07:48 2014 +0000
@@ -234,10 +234,10 @@
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
rm -f $@
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
- { system ("cat vm.def"); } \
+ { system ("cat mapfile_ext"); system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
@@ -249,6 +249,13 @@
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
+mapfile_ext:
+ rm -f $@
+ touch $@
+ if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \
+ cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \
+ fi
+
STATIC_CXX = false
ifeq ($(LINK_INTO),AOUT)
@@ -265,6 +272,8 @@
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
+ else
+ LFLAGS_VM += -Wl,-z,defs
endif
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
--- a/hotspot/make/excludeSrc.make Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/make/excludeSrc.make Fri Sep 26 06:07:48 2014 +0000
@@ -21,6 +21,9 @@
# questions.
#
#
+
+include $(GAMMADIR)/make/altsrc.make
+
ifeq ($(INCLUDE_JVMTI), false)
CXXFLAGS += -DINCLUDE_JVMTI=0
CFLAGS += -DINCLUDE_JVMTI=0
@@ -78,12 +81,12 @@
CXXFLAGS += -DINCLUDE_ALL_GCS=0
CFLAGS += -DINCLUDE_ALL_GCS=0
- gc_impl := $(GAMMADIR)/src/share/vm/gc_implementation
- gc_exclude := \
- $(notdir $(wildcard $(gc_impl)/concurrentMarkSweep/*.cpp)) \
- $(notdir $(wildcard $(gc_impl)/g1/*.cpp)) \
- $(notdir $(wildcard $(gc_impl)/parallelScavenge/*.cpp)) \
- $(notdir $(wildcard $(gc_impl)/parNew/*.cpp))
+ gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation
+ gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation
+ gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew
+ gc_exclude := $(foreach gc,$(gc_subdirs), \
+ $(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp)) \
+ $(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp)))
Src_Files_EXCLUDE += $(gc_exclude)
# Exclude everything in $(gc_impl)/shared except the files listed
--- a/hotspot/make/linux/makefiles/vm.make Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/make/linux/makefiles/vm.make Fri Sep 26 06:07:48 2014 +0000
@@ -227,10 +227,10 @@
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
rm -f $@
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
- { system ("cat vm.def"); } \
+ { system ("cat mapfile_ext"); system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
@@ -242,6 +242,13 @@
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
+mapfile_ext:
+ rm -f $@
+ touch $@
+ if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \
+ cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \
+ fi
+
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
STATIC_CXX = false
else
@@ -261,6 +268,7 @@
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
+ LFLAGS_VM += -Wl,-z,defs
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
# get around library dependency and compatibility issues. Must use gcc not
--- a/hotspot/make/solaris/makefiles/buildtree.make Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/make/solaris/makefiles/buildtree.make Fri Sep 26 06:07:48 2014 +0000
@@ -258,6 +258,8 @@
echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
[ -n "$(ZIPEXE)" ] && \
echo && echo "ZIPEXE = $(ZIPEXE)"; \
+ [ -n "$(HS_ALT_MAKE)" ] && \
+ echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
echo && \
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/hotspot/make/solaris/makefiles/vm.make Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/make/solaris/makefiles/vm.make Fri Sep 26 06:07:48 2014 +0000
@@ -130,7 +130,7 @@
# Not sure what the 'designed for' comment is referring too above.
# The order may not be too significant anymore, but I have placed this
# older libm before libCrun, just to make sure it's found and used first.
-LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle
+LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle -lnsl
else
ifeq ($(COMPILER_REV_NUMERIC), 502)
# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
@@ -249,11 +249,12 @@
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
-mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
+mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext
rm -f $@
cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
| $(NAWK) '{ \
if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") { \
+ system ("cat mapfile_ext"); \
system ("cat vm.def"); \
} else { \
print $$0; \
@@ -267,6 +268,13 @@
vm.def: $(Obj_Files)
sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
+mapfile_ext:
+ rm -f $@
+ touch $@
+ if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \
+ cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \
+ fi
+
ifeq ($(LINK_INTO),AOUT)
LIBJVM.o =
LIBJVM_MAPFILE =
@@ -276,6 +284,7 @@
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_extended
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
+ LFLAGS_VM += -Wl,-z,defs
ifndef USE_GCC
LIBS_VM = $(LIBS)
else
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -3129,8 +3129,7 @@
return true;
}
-char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
- bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
fatal("os::reserve_memory_special should not be called on Solaris.");
return NULL;
}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -4167,7 +4167,7 @@
// been published), so we do not need to check for
// uninitialized objects before pushing here.
void Par_ConcMarkingClosure::do_oop(oop obj) {
- assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+ assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
@@ -7226,7 +7226,7 @@
// isMarked() query is "safe".
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
// Ignore mark word because we are running concurrent with mutators
- assert(p->is_oop_or_null(true), "expected an oop or null");
+ assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
HeapWord* addr = (HeapWord*)p;
assert(_span.contains(addr), "we are scanning the CMS generation");
bool is_obj_array = false;
@@ -7666,7 +7666,7 @@
}
void PushAndMarkVerifyClosure::do_oop(oop obj) {
- assert(obj->is_oop_or_null(), "expected an oop or NULL");
+ assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
@@ -7764,7 +7764,7 @@
void PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
- assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+ assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
@@ -7802,7 +7802,7 @@
void Par_PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
- assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+ assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
@@ -7879,7 +7879,7 @@
// path and may be at the end of the global overflow list (so
// the mark word may be NULL).
assert(obj->is_oop_or_null(true /* ignore mark word */),
- "expected an oop or NULL");
+ err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
@@ -7959,7 +7959,7 @@
// the debugger, is_oop_or_null(false) may subsequently start
// to hold.
assert(obj->is_oop_or_null(true),
- "expected an oop or NULL");
+ err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -73,7 +73,7 @@
} else {
res = (PromotedObject*)(_next & next_mask);
}
- assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
+ assert(oop(res)->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res))));
return res;
}
inline void setNext(PromotedObject* x) {
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -107,7 +107,7 @@
HeapRegion *curr = regions_at(index++);
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
guarantee(!curr->is_young(), "should not be young!");
- guarantee(!curr->isHumongous(), "should not be humongous!");
+ guarantee(!curr->is_humongous(), "should not be humongous!");
if (prev != NULL) {
guarantee(order_regions(prev, curr) != 1,
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
@@ -149,7 +149,7 @@
void CollectionSetChooser::add_region(HeapRegion* hr) {
- assert(!hr->isHumongous(),
+ assert(!hr->is_humongous(),
"Humongous regions shouldn't be added to the collection set");
assert(!hr->is_young(), "should not be young!");
_regions.append(hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -109,7 +109,7 @@
bool should_add(HeapRegion* hr) {
assert(hr->is_marked(), "pre-condition");
assert(!hr->is_young(), "should never consider young regions");
- return !hr->isHumongous() &&
+ return !hr->is_humongous() &&
hr->live_bytes() < _region_live_threshold_bytes;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -910,7 +910,7 @@
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
- if (!r->continuesHumongous()) {
+ if (!r->is_continues_humongous()) {
r->note_start_of_marking();
}
return false;
@@ -1288,6 +1288,22 @@
print_stats();
}
+// Helper class to get rid of some boilerplate code.
+class G1CMTraceTime : public GCTraceTime {
+ static bool doit_and_prepend(bool doit) {
+ if (doit) {
+ gclog_or_tty->put(' ');
+ }
+ return doit;
+ }
+
+ public:
+ G1CMTraceTime(const char* title, bool doit)
+ : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
+ G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
+ }
+};
+
void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
// world is stopped at this checkpoint
assert(SafepointSynchronize::is_at_safepoint(),
@@ -1341,9 +1357,13 @@
// marking due to overflowing the global mark stack.
reset_marking_state();
} else {
- // Aggregate the per-task counting data that we have accumulated
- // while marking.
- aggregate_count_data();
+ {
+ G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
+
+ // Aggregate the per-task counting data that we have accumulated
+ // while marking.
+ aggregate_count_data();
+ }
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
// We're done with marking.
@@ -1398,10 +1418,10 @@
// to 1 the bits on the region bitmap that correspond to its
// associated "continues humongous" regions.
void set_bit_for_region(HeapRegion* hr) {
- assert(!hr->continuesHumongous(), "should have filtered those out");
+ assert(!hr->is_continues_humongous(), "should have filtered those out");
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
- if (!hr->startsHumongous()) {
+ if (!hr->is_starts_humongous()) {
// Normal (non-humongous) case: just set the bit.
_region_bm->par_at_put(index, true);
} else {
@@ -1434,7 +1454,7 @@
bool doHeapRegion(HeapRegion* hr) {
- if (hr->continuesHumongous()) {
+ if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed (see
// set_bit_for_heap_region()). Note that we cannot rely on their
@@ -1556,7 +1576,7 @@
int failures() const { return _failures; }
bool doHeapRegion(HeapRegion* hr) {
- if (hr->continuesHumongous()) {
+ if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed (see
// set_bit_for_heap_region()). Note that we cannot rely on their
@@ -1731,7 +1751,7 @@
bool doHeapRegion(HeapRegion* hr) {
- if (hr->continuesHumongous()) {
+ if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed (see
// set_bit_for_heap_region()). Note that we cannot rely on their
@@ -1861,7 +1881,7 @@
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
bool doHeapRegion(HeapRegion *hr) {
- if (hr->continuesHumongous()) {
+ if (hr->is_continues_humongous()) {
return false;
}
// We use a claim value of zero here because all regions
@@ -1875,8 +1895,8 @@
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
_freed_bytes += hr->used();
hr->set_containing_set(NULL);
- if (hr->isHumongous()) {
- assert(hr->startsHumongous(), "we should only see starts humongous");
+ if (hr->is_humongous()) {
+ assert(hr->is_starts_humongous(), "we should only see starts humongous");
_humongous_regions_removed.increment(1u, hr->capacity());
_g1->free_humongous_region(hr, _local_cleanup_list, true);
} else {
@@ -2466,22 +2486,6 @@
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
}
-// Helper class to get rid of some boilerplate code.
-class G1RemarkGCTraceTime : public GCTraceTime {
- static bool doit_and_prepend(bool doit) {
- if (doit) {
- gclog_or_tty->put(' ');
- }
- return doit;
- }
-
- public:
- G1RemarkGCTraceTime(const char* title, bool doit)
- : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
- G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
- }
-};
-
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
if (has_overflown()) {
// Skip processing the discovered references if we have
@@ -2504,10 +2508,7 @@
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
{
- if (G1Log::finer()) {
- gclog_or_tty->put(' ');
- }
- GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
+ G1CMTraceTime t("GC ref-proc", G1Log::finer());
ReferenceProcessor* rp = g1h->ref_processor_cm();
@@ -2598,24 +2599,24 @@
// Unload Klasses, String, Symbols, Code Cache, etc.
{
- G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
+ G1CMTraceTime trace("Unloading", G1Log::finer());
if (ClassUnloadingWithConcurrentMark) {
bool purged_classes;
{
- G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
+ G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
}
{
- G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
+ G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
}
if (G1StringDedup::is_enabled()) {
- G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
+ G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
G1StringDedup::unlink(&g1_is_alive);
}
}
@@ -2719,7 +2720,7 @@
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
+ G1CMTraceTime trace("Finalize Marking", G1Log::finer());
g1h->ensure_parsability(false);
@@ -3191,7 +3192,7 @@
_cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
bool doHeapRegion(HeapRegion* hr) {
- if (hr->continuesHumongous()) {
+ if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed.
// Note that we cannot rely on their associated
@@ -3334,6 +3335,7 @@
} else {
g1_par_agg_task.work(0);
}
+ _g1h->allocation_context_stats().update_at_remark();
}
// Clear the per-worker arrays used to store the per-region counting data
@@ -3562,7 +3564,7 @@
void CMTask::setup_for_region(HeapRegion* hr) {
assert(hr != NULL,
"claim_region() should have filtered out NULL regions");
- assert(!hr->continuesHumongous(),
+ assert(!hr->is_continues_humongous(),
"claim_region() should have filtered out continues humongous regions");
if (_cm->verbose_low()) {
@@ -4287,7 +4289,7 @@
HR_FORMAT_PARAMS(_curr_region));
}
- assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
+ assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
"humongous regions should go around loop once only");
// Some special cases:
@@ -4301,7 +4303,7 @@
if (mr.is_empty()) {
giveup_current_region();
regular_clock_call();
- } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
+ } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
if (_nextMarkBitMap->isMarked(mr.start())) {
// The object is marked - apply the closure
BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
@@ -4748,7 +4750,7 @@
size_t remset_bytes = r->rem_set()->mem_size();
size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
- if (r->startsHumongous()) {
+ if (r->is_starts_humongous()) {
assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
_hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
"they should have been zeroed after the last time we used them");
@@ -4760,7 +4762,7 @@
get_hum_bytes(&used_bytes, &capacity_bytes,
&prev_live_bytes, &next_live_bytes);
end = bottom + HeapRegion::GrainWords;
- } else if (r->continuesHumongous()) {
+ } else if (r->is_continues_humongous()) {
get_hum_bytes(&used_bytes, &capacity_bytes,
&prev_live_bytes, &next_live_bytes);
assert(end == bottom + HeapRegion::GrainWords, "invariant");
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -88,7 +88,7 @@
size_t region_size_bytes = mr.byte_size();
uint index = hr->hrm_index();
- assert(!hr->continuesHumongous(), "should not be HC region");
+ assert(!hr->is_continues_humongous(), "should not be HC region");
assert(hr == g1h->heap_region_containing(start), "sanity");
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
assert(marked_bytes_array != NULL, "pre-condition");
@@ -277,7 +277,7 @@
++_refs_reached;
HeapWord* objAddr = (HeapWord*) obj;
- assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
+ assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
if (_g1h->is_in_g1_reserved(objAddr)) {
assert(obj != NULL, "null check is implicit");
if (!_nextMarkBitMap->isMarked(objAddr)) {
@@ -366,7 +366,7 @@
assert(hr != NULL, "sanity");
// Given that we're looking for a region that contains an object
// header it's impossible to get back a HC region.
- assert(!hr->continuesHumongous(), "sanity");
+ assert(!hr->is_continues_humongous(), "sanity");
// We cannot assert that word_size == obj->size() given that obj
// might not be in a consistent state (another thread might be in
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -129,8 +129,7 @@
// Note that we first perform the allocation and then we store the
// region in _alloc_region. This is the reason why an active region
// can never be empty.
- _alloc_region = new_alloc_region;
- _count += 1;
+ update_alloc_region(new_alloc_region);
trace("region allocation successful");
return result;
} else {
@@ -172,6 +171,19 @@
trace("set");
}
+void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
+ trace("update");
+ // We explicitly check that the region is not empty to make sure we
+ // maintain the "the alloc region cannot be empty" invariant.
+ assert(alloc_region != NULL && !alloc_region->is_empty(),
+ ar_ext_msg(this, "pre-condition"));
+
+ _alloc_region = alloc_region;
+ _alloc_region->set_allocation_context(allocation_context());
+ _count += 1;
+ trace("updated");
+}
+
HeapRegion* G1AllocRegion::release() {
trace("releasing");
HeapRegion* alloc_region = _alloc_region;
@@ -225,5 +237,70 @@
G1AllocRegion::G1AllocRegion(const char* name,
bool bot_updates)
: _name(name), _bot_updates(bot_updates),
- _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
+ _alloc_region(NULL), _count(0), _used_bytes_before(0),
+ _allocation_context(AllocationContext::system()) { }
+
+
+HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
+ bool force) {
+ return _g1h->new_mutator_alloc_region(word_size, force);
+}
+
+void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
+ size_t allocated_bytes) {
+ _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
+}
+
+HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
+ bool force) {
+ assert(!force, "not supported for GC alloc regions");
+ return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+}
+
+void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
+ size_t allocated_bytes) {
+ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+ GCAllocForSurvived);
+}
+
+HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
+ bool force) {
+ assert(!force, "not supported for GC alloc regions");
+ return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+}
+void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
+ size_t allocated_bytes) {
+ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+ GCAllocForTenured);
+}
+
+HeapRegion* OldGCAllocRegion::release() {
+ HeapRegion* cur = get();
+ if (cur != NULL) {
+ // Determine how far we are from the next card boundary. If it is smaller than
+ // the minimum object size we can allocate into, expand into the next card.
+ HeapWord* top = cur->top();
+ HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
+
+ size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
+
+ if (to_allocate_words != 0) {
+ // We are not at a card boundary. Fill up, possibly into the next, taking the
+ // end of the region and the minimum object size into account.
+ to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
+ MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
+
+ // Skip allocation if there is not enough space to allocate even the smallest
+ // possible object. In this case this region will not be retained, so the
+ // original problem cannot occur.
+ if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
+ HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
+ CollectedHeap::fill_with_object(dummy, to_allocate_words);
+ }
+ }
+ }
+ return G1AllocRegion::release();
+}
+
+
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -57,6 +57,9 @@
// correct use of init() and release()).
HeapRegion* volatile _alloc_region;
+ // Allocation context associated with this alloc region.
+ AllocationContext_t _allocation_context;
+
// It keeps track of the distinct number of regions that are used
// for allocation in the active interval of this object, i.e.,
// between a call to init() and a call to release(). The count
@@ -110,6 +113,10 @@
// else can allocate out of it.
void retire(bool fill_up);
+ // After a region is allocated by alloc_new_region, this
+ // method is used to set it as the active alloc_region
+ void update_alloc_region(HeapRegion* alloc_region);
+
// Allocate a new active region and use it to perform a word_size
// allocation. The force parameter will be passed on to
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
@@ -137,6 +144,9 @@
return (hr == _dummy_region) ? NULL : hr;
}
+ void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+ AllocationContext_t allocation_context() { return _allocation_context; }
+
uint count() { return _count; }
// The following two are the building blocks for the allocation method.
@@ -182,6 +192,40 @@
#endif // G1_ALLOC_REGION_TRACING
};
+class MutatorAllocRegion : public G1AllocRegion {
+protected:
+ virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+ virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+ MutatorAllocRegion()
+ : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
+};
+
+class SurvivorGCAllocRegion : public G1AllocRegion {
+protected:
+ virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+ virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+ SurvivorGCAllocRegion()
+ : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
+};
+
+class OldGCAllocRegion : public G1AllocRegion {
+protected:
+ virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+ virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+ OldGCAllocRegion()
+ : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+
+ // This specialization of release() makes sure that the last card that has
+ // been allocated into has been completely filled by a dummy object. This
+ // avoids races when remembered set scanning wants to update the BOT of the
+ // last card in the retained old gc alloc region, and allocation threads
+ // allocating into that card at the same time.
+ virtual HeapRegion* release();
+};
+
class ar_ext_msg : public err_msg {
public:
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+
+#include "memory/allocation.hpp"
+
+typedef unsigned char AllocationContext_t;
+
+class AllocationContext : AllStatic {
+public:
+ // Currently used context
+ static AllocationContext_t current() {
+ return 0;
+ }
+ // System wide default context
+ static AllocationContext_t system() {
+ return 0;
+ }
+};
+
+class AllocationContextStats: public StackObj {
+public:
+ inline void clear() { }
+ inline void update(bool full_gc) { }
+ inline void update_at_remark() { }
+ inline bool available() { return false; }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+void G1DefaultAllocator::init_mutator_alloc_region() {
+ assert(_mutator_alloc_region.get() == NULL, "pre-condition");
+ _mutator_alloc_region.init();
+}
+
+void G1DefaultAllocator::release_mutator_alloc_region() {
+ _mutator_alloc_region.release();
+ assert(_mutator_alloc_region.get() == NULL, "post-condition");
+}
+
+void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
+ OldGCAllocRegion* old,
+ HeapRegion** retained_old) {
+ HeapRegion* retained_region = *retained_old;
+ *retained_old = NULL;
+
+ // We will discard the current GC alloc region if:
+ // a) it's in the collection set (it can happen!),
+ // b) it's already full (no point in using it),
+ // c) it's empty (this means that it was emptied during
+ // a cleanup and it should be on the free list now), or
+ // d) it's humongous (this means that it was emptied
+ // during a cleanup and was added to the free list, but
+ // has been subsequently used to allocate a humongous
+ // object that may be less than the region size).
+ if (retained_region != NULL &&
+ !retained_region->in_collection_set() &&
+ !(retained_region->top() == retained_region->end()) &&
+ !retained_region->is_empty() &&
+ !retained_region->is_humongous()) {
+ retained_region->record_top_and_timestamp();
+ // The retained region was added to the old region set when it was
+ // retired. We have to remove it now, since we don't allow regions
+ // we allocate to in the region sets. We'll re-add it later, when
+ // it's retired again.
+ _g1h->_old_set.remove(retained_region);
+ bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+ retained_region->note_start_of_copying(during_im);
+ old->set(retained_region);
+ _g1h->_hr_printer.reuse(retained_region);
+ evacuation_info.set_alloc_regions_used_before(retained_region->used());
+ }
+}
+
+void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
+ assert_at_safepoint(true /* should_be_vm_thread */);
+
+ _survivor_gc_alloc_region.init();
+ _old_gc_alloc_region.init();
+ reuse_retained_old_region(evacuation_info,
+ &_old_gc_alloc_region,
+ &_retained_old_gc_alloc_region);
+}
+
+void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
+ AllocationContext_t context = AllocationContext::current();
+ evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
+ old_gc_alloc_region(context)->count());
+ survivor_gc_alloc_region(context)->release();
+ // If we have an old GC alloc region to release, we'll save it in
+ // _retained_old_gc_alloc_region. If we don't
+ // _retained_old_gc_alloc_region will become NULL. This is what we
+ // want either way so no reason to check explicitly for either
+ // condition.
+ _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+
+ if (ResizePLAB) {
+ _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+ _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+ }
+}
+
+void G1DefaultAllocator::abandon_gc_alloc_regions() {
+ assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+ assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+ _retained_old_gc_alloc_region = NULL;
+}
+
+G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
+ ParGCAllocBuffer(gclab_word_size), _retired(true) { }
+
+HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+ HeapWord* obj = NULL;
+ size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+ if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
+ G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
+ add_to_alloc_buffer_waste(alloc_buf->words_remaining());
+ alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+
+ HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
+ if (buf == NULL) {
+ return NULL; // Let caller handle allocation failure.
+ }
+ // Otherwise.
+ alloc_buf->set_word_size(gclab_word_size);
+ alloc_buf->set_buf(buf);
+
+ obj = alloc_buf->allocate(word_sz);
+ assert(obj != NULL, "buffer was definitely big enough...");
+ } else {
+ obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
+ }
+ return obj;
+}
+
+G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
+ G1ParGCAllocator(g1h),
+ _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+ _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
+
+ _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+ _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
+
+}
+
+void G1DefaultParGCAllocator::retire_alloc_buffers() {
+ for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
+ size_t waste = _alloc_buffers[ap]->words_remaining();
+ add_to_alloc_buffer_waste(waste);
+ _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
+ true /* end_of_gc */,
+ false /* retain */);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+
+enum GCAllocPurpose {
+ GCAllocForTenured,
+ GCAllocForSurvived,
+ GCAllocPurposeCount
+};
+
+// Base class for G1 allocators.
+class G1Allocator : public CHeapObj<mtGC> {
+ friend class VMStructs;
+protected:
+ G1CollectedHeap* _g1h;
+
+ // Outside of GC pauses, the number of bytes used in all regions other
+ // than the current allocation region.
+ size_t _summary_bytes_used;
+
+public:
+ G1Allocator(G1CollectedHeap* heap) :
+ _g1h(heap), _summary_bytes_used(0) { }
+
+ static G1Allocator* create_allocator(G1CollectedHeap* g1h);
+
+ virtual void init_mutator_alloc_region() = 0;
+ virtual void release_mutator_alloc_region() = 0;
+
+ virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
+ virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
+ virtual void abandon_gc_alloc_regions() = 0;
+
+ virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
+ virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
+ virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
+ virtual size_t used() = 0;
+ virtual bool is_retained_old_region(HeapRegion* hr) = 0;
+
+ void reuse_retained_old_region(EvacuationInfo& evacuation_info,
+ OldGCAllocRegion* old,
+ HeapRegion** retained);
+
+ size_t used_unlocked() const {
+ return _summary_bytes_used;
+ }
+
+ void increase_used(size_t bytes) {
+ _summary_bytes_used += bytes;
+ }
+
+ void decrease_used(size_t bytes) {
+ assert(_summary_bytes_used >= bytes,
+ err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
+ _summary_bytes_used, bytes));
+ _summary_bytes_used -= bytes;
+ }
+
+ void set_used(size_t bytes) {
+ _summary_bytes_used = bytes;
+ }
+
+ virtual HeapRegion* new_heap_region(uint hrs_index,
+ G1BlockOffsetSharedArray* sharedOffsetArray,
+ MemRegion mr) {
+ return new HeapRegion(hrs_index, sharedOffsetArray, mr);
+ }
+};
+
+// The default allocator for G1.
+class G1DefaultAllocator : public G1Allocator {
+protected:
+ // Alloc region used to satisfy mutator allocation requests.
+ MutatorAllocRegion _mutator_alloc_region;
+
+ // Alloc region used to satisfy allocation requests by the GC for
+ // survivor objects.
+ SurvivorGCAllocRegion _survivor_gc_alloc_region;
+
+ // Alloc region used to satisfy allocation requests by the GC for
+ // old objects.
+ OldGCAllocRegion _old_gc_alloc_region;
+
+ HeapRegion* _retained_old_gc_alloc_region;
+public:
+ G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
+
+ virtual void init_mutator_alloc_region();
+ virtual void release_mutator_alloc_region();
+
+ virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
+ virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
+ virtual void abandon_gc_alloc_regions();
+
+ virtual bool is_retained_old_region(HeapRegion* hr) {
+ return _retained_old_gc_alloc_region == hr;
+ }
+
+ virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
+ return &_mutator_alloc_region;
+ }
+
+ virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
+ return &_survivor_gc_alloc_region;
+ }
+
+ virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
+ return &_old_gc_alloc_region;
+ }
+
+ virtual size_t used() {
+ assert(Heap_lock->owner() != NULL,
+ "Should be owned on this thread's behalf.");
+ size_t result = _summary_bytes_used;
+
+ // Read only once in case it is set to NULL concurrently
+ HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
+ if (hr != NULL) {
+ result += hr->used();
+ }
+ return result;
+ }
+};
+
+class G1ParGCAllocBuffer: public ParGCAllocBuffer {
+private:
+ bool _retired;
+
+public:
+ G1ParGCAllocBuffer(size_t gclab_word_size);
+ virtual ~G1ParGCAllocBuffer() {
+ guarantee(_retired, "Allocation buffer has not been retired");
+ }
+
+ virtual void set_buf(HeapWord* buf) {
+ ParGCAllocBuffer::set_buf(buf);
+ _retired = false;
+ }
+
+ virtual void retire(bool end_of_gc, bool retain) {
+ if (_retired) {
+ return;
+ }
+ ParGCAllocBuffer::retire(end_of_gc, retain);
+ _retired = true;
+ }
+};
+
+class G1ParGCAllocator : public CHeapObj<mtGC> {
+ friend class G1ParScanThreadState;
+protected:
+ G1CollectedHeap* _g1h;
+
+ size_t _alloc_buffer_waste;
+ size_t _undo_waste;
+
+ void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
+ void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
+
+ HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
+
+ virtual void retire_alloc_buffers() = 0;
+ virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
+
+public:
+ G1ParGCAllocator(G1CollectedHeap* g1h) :
+ _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
+ }
+
+ static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
+
+ size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
+ size_t undo_waste() {return _undo_waste; }
+
+ HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+ HeapWord* obj = NULL;
+ if (purpose == GCAllocForSurvived) {
+ obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+ } else {
+ obj = alloc_buffer(purpose, context)->allocate(word_sz);
+ }
+ if (obj != NULL) {
+ return obj;
+ }
+ return allocate_slow(purpose, word_sz, context);
+ }
+
+ void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+ if (alloc_buffer(purpose, context)->contains(obj)) {
+ assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
+ "should contain whole object");
+ alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
+ } else {
+ CollectedHeap::fill_with_object(obj, word_sz);
+ add_to_undo_waste(word_sz);
+ }
+ }
+};
+
+class G1DefaultParGCAllocator : public G1ParGCAllocator {
+ G1ParGCAllocBuffer _surviving_alloc_buffer;
+ G1ParGCAllocBuffer _tenured_alloc_buffer;
+ G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+
+public:
+ G1DefaultParGCAllocator(G1CollectedHeap* g1h);
+
+ virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
+ return _alloc_buffers[purpose];
+ }
+
+ virtual void retire_alloc_buffers() ;
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator_ext.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
+ return new G1DefaultAllocator(g1h);
+}
+
+G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
+ return new G1DefaultParGCAllocator(g1h);
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -469,7 +469,7 @@
// can move in an incremental collection.
bool G1CollectedHeap::is_scavengable(const void* p) {
HeapRegion* hr = heap_region_containing(p);
- return !hr->isHumongous();
+ return !hr->is_humongous();
}
void G1CollectedHeap::check_ct_logs_at_safepoint() {
@@ -560,7 +560,7 @@
}
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
- assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
+ assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
"the only time we use this to allocate a humongous region is "
"when we are allocating a single humongous region");
@@ -615,9 +615,10 @@
HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
- size_t word_size) {
+ size_t word_size,
+ AllocationContext_t context) {
assert(first != G1_NO_HRM_INDEX, "pre-condition");
- assert(isHumongous(word_size), "word_size should be humongous");
+ assert(is_humongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
// Index of last region in the series + 1.
@@ -666,14 +667,15 @@
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
- first_hr->set_startsHumongous(new_top, new_end);
-
+ first_hr->set_starts_humongous(new_top, new_end);
+ first_hr->set_allocation_context(context);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (uint i = first + 1; i < last; ++i) {
hr = region_at(i);
- hr->set_continuesHumongous(first_hr);
+ hr->set_continues_humongous(first_hr);
+ hr->set_allocation_context(context);
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
@@ -711,7 +713,7 @@
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
- // !continuesHumongous(), but it is easier to just update the top
+ // !is_continues_humongous(), but it is easier to just update the top
// fields here. The way we set top for all regions (i.e., top ==
// end for all regions but the last one, top == new_top for the
// last one) is actually used when we will free up the humongous
@@ -740,7 +742,7 @@
check_bitmaps("Humongous Region Allocation", first_hr);
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
- _summary_bytes_used += first_hr->used();
+ _allocator->increase_used(first_hr->used());
_humongous_set.add(first_hr);
return new_obj;
@@ -749,7 +751,7 @@
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
-HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
verify_region_sets_optional();
@@ -818,7 +820,8 @@
HeapWord* result = NULL;
if (first != G1_NO_HRM_INDEX) {
- result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
+ result = humongous_obj_allocate_initialize_regions(first, obj_regions,
+ word_size, context);
assert(result != NULL, "it should always return a valid result");
// A successful humongous object allocation changes the used space
@@ -834,7 +837,7 @@
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
assert_heap_not_locked_and_not_at_safepoint();
- assert(!isHumongous(word_size), "we do not allow humongous TLABs");
+ assert(!is_humongous(word_size), "we do not allow humongous TLABs");
unsigned int dummy_gc_count_before;
int dummy_gclocker_retry_count = 0;
@@ -851,7 +854,7 @@
unsigned int gc_count_before;
HeapWord* result = NULL;
- if (!isHumongous(word_size)) {
+ if (!is_humongous(word_size)) {
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
} else {
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
@@ -862,6 +865,8 @@
// Create the garbage collection operation...
VM_G1CollectForAllocation op(gc_count_before, word_size);
+ op.set_allocation_context(AllocationContext::current());
+
// ...and get the VM thread to execute it.
VMThread::execute(&op);
@@ -870,7 +875,7 @@
// if it is NULL. If the allocation attempt failed immediately
// after a Full GC, it's unlikely we'll be able to allocate now.
HeapWord* result = op.result();
- if (result != NULL && !isHumongous(word_size)) {
+ if (result != NULL && !is_humongous(word_size)) {
// Allocations that take place on VM operations do not do any
// card dirtying and we have to do it here. We only have to do
// this for non-humongous allocations, though.
@@ -897,12 +902,13 @@
}
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
- unsigned int *gc_count_before_ret,
- int* gclocker_retry_count_ret) {
+ AllocationContext_t context,
+ unsigned int *gc_count_before_ret,
+ int* gclocker_retry_count_ret) {
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint();
- assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
+ assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
"be called for humongous allocation requests");
// We should only get here after the first-level allocation attempt
@@ -919,23 +925,22 @@
{
MutexLockerEx x(Heap_lock);
-
- result = _mutator_alloc_region.attempt_allocation_locked(word_size,
- false /* bot_updates */);
+ result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
+ false /* bot_updates */);
if (result != NULL) {
return result;
}
// If we reach here, attempt_allocation_locked() above failed to
// allocate a new region. So the mutator alloc region should be NULL.
- assert(_mutator_alloc_region.get() == NULL, "only way to get here");
+ assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
if (GC_locker::is_active_and_needs_gc()) {
if (g1_policy()->can_expand_young_list()) {
// No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true.
- result = _mutator_alloc_region.attempt_allocation_force(word_size,
- false /* bot_updates */);
+ result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
+ false /* bot_updates */);
if (result != NULL) {
return result;
}
@@ -995,8 +1000,8 @@
// first attempt (without holding the Heap_lock) here and the
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
- result = _mutator_alloc_region.attempt_allocation(word_size,
- false /* bot_updates */);
+ result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+ false /* bot_updates */);
if (result != NULL) {
return result;
}
@@ -1014,8 +1019,8 @@
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
- unsigned int * gc_count_before_ret,
- int* gclocker_retry_count_ret) {
+ unsigned int * gc_count_before_ret,
+ int* gclocker_retry_count_ret) {
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
@@ -1028,7 +1033,7 @@
// much as possible.
assert_heap_not_locked_and_not_at_safepoint();
- assert(isHumongous(word_size), "attempt_allocation_humongous() "
+ assert(is_humongous(word_size), "attempt_allocation_humongous() "
"should only be called for humongous allocations");
// Humongous objects can exhaust the heap quickly, so we should check if we
@@ -1056,7 +1061,7 @@
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
- result = humongous_obj_allocate(word_size);
+ result = humongous_obj_allocate(word_size, AllocationContext::current());
if (result != NULL) {
return result;
}
@@ -1132,17 +1137,18 @@
}
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
- bool expect_null_mutator_alloc_region) {
+ AllocationContext_t context,
+ bool expect_null_mutator_alloc_region) {
assert_at_safepoint(true /* should_be_vm_thread */);
- assert(_mutator_alloc_region.get() == NULL ||
+ assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
!expect_null_mutator_alloc_region,
"the current alloc region was unexpectedly found to be non-NULL");
- if (!isHumongous(word_size)) {
- return _mutator_alloc_region.attempt_allocation_locked(word_size,
+ if (!is_humongous(word_size)) {
+ return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
} else {
- HeapWord* result = humongous_obj_allocate(word_size);
+ HeapWord* result = humongous_obj_allocate(word_size, context);
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
g1_policy()->set_initiate_conc_mark_if_possible();
}
@@ -1162,7 +1168,7 @@
bool doHeapRegion(HeapRegion* r) {
HeapRegionRemSet* hrrs = r->rem_set();
- if (r->continuesHumongous()) {
+ if (r->is_continues_humongous()) {
// We'll assert that the strong code root list and RSet is empty
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
assert(hrrs->occupied() == 0, "RSet should be empty");
@@ -1199,7 +1205,7 @@
{ }
bool doHeapRegion(HeapRegion* r) {
- if (!r->continuesHumongous()) {
+ if (!r->is_continues_humongous()) {
_cl.set_from(r);
r->oop_iterate(&_cl);
}
@@ -1231,14 +1237,14 @@
assert(!hr->is_young(), "not expecting to find young regions");
if (hr->is_free()) {
// We only generate output for non-empty regions.
- } else if (hr->startsHumongous()) {
+ } else if (hr->is_starts_humongous()) {
if (hr->region_num() == 1) {
// single humongous region
_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
} else {
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
}
- } else if (hr->continuesHumongous()) {
+ } else if (hr->is_continues_humongous()) {
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
} else if (hr->is_old()) {
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
@@ -1342,8 +1348,8 @@
concurrent_mark()->abort();
// Make sure we'll choose a new allocation region afterwards.
- release_mutator_alloc_region();
- abandon_gc_alloc_regions();
+ _allocator->release_mutator_alloc_region();
+ _allocator->abandon_gc_alloc_regions();
g1_rem_set()->cleanupHRRS();
// We should call this after we retire any currently active alloc
@@ -1515,7 +1521,7 @@
clear_cset_fast_test();
- init_mutator_alloc_region();
+ _allocator->init_mutator_alloc_region();
double end = os::elapsedTime();
g1_policy()->record_full_collection_end();
@@ -1651,6 +1657,7 @@
HeapWord*
G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
+ AllocationContext_t context,
bool* succeeded) {
assert_at_safepoint(true /* should_be_vm_thread */);
@@ -1658,7 +1665,8 @@
// Let's attempt the allocation first.
HeapWord* result =
attempt_allocation_at_safepoint(word_size,
- false /* expect_null_mutator_alloc_region */);
+ context,
+ false /* expect_null_mutator_alloc_region */);
if (result != NULL) {
assert(*succeeded, "sanity");
return result;
@@ -1668,7 +1676,7 @@
// incremental pauses. Therefore, at least for now, we'll favor
// expansion over collection. (This might change in the future if we can
// do something smarter than full collection to satisfy a failed alloc.)
- result = expand_and_allocate(word_size);
+ result = expand_and_allocate(word_size, context);
if (result != NULL) {
assert(*succeeded, "sanity");
return result;
@@ -1685,7 +1693,8 @@
// Retry the allocation
result = attempt_allocation_at_safepoint(word_size,
- true /* expect_null_mutator_alloc_region */);
+ context,
+ true /* expect_null_mutator_alloc_region */);
if (result != NULL) {
assert(*succeeded, "sanity");
return result;
@@ -1702,7 +1711,8 @@
// Retry the allocation once more
result = attempt_allocation_at_safepoint(word_size,
- true /* expect_null_mutator_alloc_region */);
+ context,
+ true /* expect_null_mutator_alloc_region */);
if (result != NULL) {
assert(*succeeded, "sanity");
return result;
@@ -1724,7 +1734,7 @@
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
-HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
assert_at_safepoint(true /* should_be_vm_thread */);
verify_region_sets_optional();
@@ -1739,7 +1749,8 @@
_hrm.verify_optional();
verify_region_sets_optional();
return attempt_allocation_at_safepoint(word_size,
- false /* expect_null_mutator_alloc_region */);
+ context,
+ false /* expect_null_mutator_alloc_region */);
}
return NULL;
}
@@ -1816,7 +1827,7 @@
// We should only reach here at the end of a Full GC which means we
// should not not be holding to any GC alloc regions. The method
// below will make sure of that and do any remaining clean up.
- abandon_gc_alloc_regions();
+ _allocator->abandon_gc_alloc_regions();
// Instead of tearing down / rebuilding the free lists here, we
// could instead use the remove_all_pending() method on free_list to
@@ -1849,7 +1860,7 @@
_bot_shared(NULL),
_evac_failure_scan_stack(NULL),
_mark_in_progress(false),
- _cg1r(NULL), _summary_bytes_used(0),
+ _cg1r(NULL),
_g1mm(NULL),
_refine_cte_cl(NULL),
_full_collection(false),
@@ -1861,7 +1872,6 @@
_free_regions_coming(false),
_young_list(new YoungList(this)),
_gc_time_stamp(0),
- _retained_old_gc_alloc_region(NULL),
_survivor_plab_stats(YoungPLABSize, PLABWeight),
_old_plab_stats(OldPLABSize, PLABWeight),
_expand_heap_after_alloc_failure(true),
@@ -1884,6 +1894,7 @@
vm_exit_during_initialization("Failed necessary allocation.");
}
+ _allocator = G1Allocator::create_allocator(_g1h);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
int n_queues = MAX2((int)ParallelGCThreads, 1);
@@ -1960,15 +1971,10 @@
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
heap_alignment);
- // It is important to do this in a way such that concurrent readers can't
- // temporarily think something is in the heap. (I've actually seen this
- // happen in asserts: DLD.)
- _reserved.set_word_size(0);
- _reserved.set_start((HeapWord*)heap_rs.base());
- _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
+ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
// Create the gen rem set (and barrier set) for the entire reserved region.
- _rem_set = collector_policy()->create_rem_set(_reserved, 2);
+ _rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
set_barrier_set(rem_set()->bs());
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
@@ -2052,7 +2058,7 @@
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
- _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
+ _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
_g1h = this;
@@ -2127,7 +2133,7 @@
dummy_region->set_top(dummy_region->end());
G1AllocRegion::setup(this, dummy_region);
- init_mutator_alloc_region();
+ _allocator->init_mutator_alloc_region();
// Do create of the monitoring and management support so that
// values in the heap have been properly initialized.
@@ -2237,14 +2243,14 @@
}
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
- assert(!hr->continuesHumongous(), "pre-condition");
+ assert(!hr->is_continues_humongous(), "pre-condition");
hr->reset_gc_time_stamp();
- if (hr->startsHumongous()) {
+ if (hr->is_starts_humongous()) {
uint first_index = hr->hrm_index() + 1;
uint last_index = hr->last_hc_index();
for (uint i = first_index; i < last_index; i += 1) {
HeapRegion* chr = region_at(i);
- assert(chr->continuesHumongous(), "sanity");
+ assert(chr->is_continues_humongous(), "sanity");
chr->reset_gc_time_stamp();
}
}
@@ -2301,21 +2307,12 @@
// Computes the sum of the storage used by the various regions.
-
size_t G1CollectedHeap::used() const {
- assert(Heap_lock->owner() != NULL,
- "Should be owned on this thread's behalf.");
- size_t result = _summary_bytes_used;
- // Read only once in case it is set to NULL concurrently
- HeapRegion* hr = _mutator_alloc_region.get();
- if (hr != NULL)
- result += hr->used();
- return result;
+ return _allocator->used();
}
size_t G1CollectedHeap::used_unlocked() const {
- size_t result = _summary_bytes_used;
- return result;
+ return _allocator->used_unlocked();
}
class SumUsedClosure: public HeapRegionClosure {
@@ -2323,7 +2320,7 @@
public:
SumUsedClosure() : _used(0) {}
bool doHeapRegion(HeapRegion* r) {
- if (!r->continuesHumongous()) {
+ if (!r->is_continues_humongous()) {
_used += r->used();
}
return false;
@@ -2355,11 +2352,12 @@
// Let's fill up most of the region
size_t word_size = HeapRegion::GrainWords - 1024;
// And as a result the region we'll allocate will be humongous.
- guarantee(isHumongous(word_size), "sanity");
+ guarantee(is_humongous(word_size), "sanity");
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
// Let's use the existing mechanism for the allocation
- HeapWord* dummy_obj = humongous_obj_allocate(word_size);
+ HeapWord* dummy_obj = humongous_obj_allocate(word_size,
+ AllocationContext::system());
if (dummy_obj != NULL) {
MemRegion mr(dummy_obj, word_size);
CollectedHeap::fill_with_object(mr);
@@ -2510,6 +2508,7 @@
true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
+ op.set_allocation_context(AllocationContext::current());
VMThread::execute(&op);
if (!op.pause_succeeded()) {
@@ -2581,7 +2580,7 @@
public:
IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
bool doHeapRegion(HeapRegion* r) {
- if (!r->continuesHumongous()) {
+ if (!r->is_continues_humongous()) {
r->oop_iterate(_cl);
}
return false;
@@ -2600,7 +2599,7 @@
public:
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
bool doHeapRegion(HeapRegion* r) {
- if (! r->continuesHumongous()) {
+ if (!r->is_continues_humongous()) {
r->object_iterate(_cl);
}
return false;
@@ -2682,11 +2681,11 @@
r->claim_value(), _claim_value);
++_failures;
}
- if (!r->isHumongous()) {
+ if (!r->is_humongous()) {
_sh_region = NULL;
- } else if (r->startsHumongous()) {
+ } else if (r->is_starts_humongous()) {
_sh_region = r;
- } else if (r->continuesHumongous()) {
+ } else if (r->is_continues_humongous()) {
if (r->humongous_start_region() != _sh_region) {
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
"HS = "PTR_FORMAT", should be "PTR_FORMAT,
@@ -2720,7 +2719,7 @@
bool doHeapRegion(HeapRegion* hr) {
assert(hr->in_collection_set(), "how?");
- assert(!hr->isHumongous(), "H-region in CSet");
+ assert(!hr->is_humongous(), "H-region in CSet");
if (hr->claim_value() != _claim_value) {
gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
"claim value = %d, should be %d",
@@ -2859,7 +2858,7 @@
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
HeapRegion* result = _hrm.next_region_in_heap(from);
- while (result != NULL && result->isHumongous()) {
+ while (result != NULL && result->is_humongous()) {
result = _hrm.next_region_in_heap(result);
}
return result;
@@ -2910,7 +2909,7 @@
// since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
- HeapRegion* hr = _mutator_alloc_region.get();
+ HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
size_t max_tlab = max_tlab_size() * wordSize;
if (hr == NULL) {
return max_tlab;
@@ -3219,7 +3218,7 @@
}
bool doHeapRegion(HeapRegion* r) {
- if (!r->continuesHumongous()) {
+ if (!r->is_continues_humongous()) {
bool failures = false;
r->verify(_vo, &failures);
if (failures) {
@@ -3597,7 +3596,7 @@
}
}
-void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
+void G1CollectedHeap::gc_epilogue(bool full) {
if (G1SummarizeRSetStats &&
(G1SummarizeRSetStatsPeriod > 0) &&
@@ -3614,6 +3613,7 @@
// always_do_update_barrier = true;
resize_all_tlabs();
+ allocation_context_stats().update(full);
// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy
@@ -3631,6 +3631,8 @@
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
gc_cause);
+
+ op.set_allocation_context(AllocationContext::current());
VMThread::execute(&op);
HeapWord* result = op.result();
@@ -3676,7 +3678,7 @@
bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
HeapRegion* region = region_at(index);
- assert(region->startsHumongous(), "Must start a humongous object");
+ assert(region->is_starts_humongous(), "Must start a humongous object");
return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
}
@@ -3689,7 +3691,7 @@
}
virtual bool doHeapRegion(HeapRegion* r) {
- if (!r->startsHumongous()) {
+ if (!r->is_starts_humongous()) {
return false;
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -3961,7 +3963,7 @@
// Forget the current alloc region (we might even choose it to be part
// of the collection set!).
- release_mutator_alloc_region();
+ _allocator->release_mutator_alloc_region();
// We should call this after we retire the mutator alloc
// region(s) so that all the ALLOC / RETIRE events are generated
@@ -4044,7 +4046,7 @@
setup_surviving_young_words();
// Initialize the GC alloc regions.
- init_gc_alloc_regions(evacuation_info);
+ _allocator->init_gc_alloc_regions(evacuation_info);
// Actually do the work...
evacuate_collection_set(evacuation_info);
@@ -4093,7 +4095,7 @@
_young_list->reset_auxilary_lists();
if (evacuation_failed()) {
- _summary_bytes_used = recalculate_used();
+ _allocator->set_used(recalculate_used());
uint n_queues = MAX2((int)ParallelGCThreads, 1);
for (uint i = 0; i < n_queues; i++) {
if (_evacuation_failed_info_array[i].has_failed()) {
@@ -4103,7 +4105,7 @@
} else {
// The "used" of the the collection set have already been subtracted
// when they were freed. Add in the bytes evacuated.
- _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
+ _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
}
if (g1_policy()->during_initial_mark_pause()) {
@@ -4125,7 +4127,7 @@
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
- init_mutator_alloc_region();
+ _allocator->init_mutator_alloc_region();
{
size_t expand_bytes = g1_policy()->expansion_amount();
@@ -4270,80 +4272,6 @@
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
}
-void G1CollectedHeap::init_mutator_alloc_region() {
- assert(_mutator_alloc_region.get() == NULL, "pre-condition");
- _mutator_alloc_region.init();
-}
-
-void G1CollectedHeap::release_mutator_alloc_region() {
- _mutator_alloc_region.release();
- assert(_mutator_alloc_region.get() == NULL, "post-condition");
-}
-
-void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
- HeapRegion* retained_region = _retained_old_gc_alloc_region;
- _retained_old_gc_alloc_region = NULL;
-
- // We will discard the current GC alloc region if:
- // a) it's in the collection set (it can happen!),
- // b) it's already full (no point in using it),
- // c) it's empty (this means that it was emptied during
- // a cleanup and it should be on the free list now), or
- // d) it's humongous (this means that it was emptied
- // during a cleanup and was added to the free list, but
- // has been subsequently used to allocate a humongous
- // object that may be less than the region size).
- if (retained_region != NULL &&
- !retained_region->in_collection_set() &&
- !(retained_region->top() == retained_region->end()) &&
- !retained_region->is_empty() &&
- !retained_region->isHumongous()) {
- retained_region->record_top_and_timestamp();
- // The retained region was added to the old region set when it was
- // retired. We have to remove it now, since we don't allow regions
- // we allocate to in the region sets. We'll re-add it later, when
- // it's retired again.
- _old_set.remove(retained_region);
- bool during_im = g1_policy()->during_initial_mark_pause();
- retained_region->note_start_of_copying(during_im);
- _old_gc_alloc_region.set(retained_region);
- _hr_printer.reuse(retained_region);
- evacuation_info.set_alloc_regions_used_before(retained_region->used());
- }
-}
-
-void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
- assert_at_safepoint(true /* should_be_vm_thread */);
-
- _survivor_gc_alloc_region.init();
- _old_gc_alloc_region.init();
-
- use_retained_old_gc_alloc_region(evacuation_info);
-}
-
-void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
- evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
- _old_gc_alloc_region.count());
- _survivor_gc_alloc_region.release();
- // If we have an old GC alloc region to release, we'll save it in
- // _retained_old_gc_alloc_region. If we don't
- // _retained_old_gc_alloc_region will become NULL. This is what we
- // want either way so no reason to check explicitly for either
- // condition.
- _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
-
- if (ResizePLAB) {
- _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
- _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
- }
-}
-
-void G1CollectedHeap::abandon_gc_alloc_regions() {
- assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
- assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
- _retained_old_gc_alloc_region = NULL;
-}
-
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
_drain_in_progress = false;
set_evac_failure_closure(cl);
@@ -4484,25 +4412,26 @@
}
HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
- size_t word_size) {
+ size_t word_size,
+ AllocationContext_t context) {
if (purpose == GCAllocForSurvived) {
- HeapWord* result = survivor_attempt_allocation(word_size);
+ HeapWord* result = survivor_attempt_allocation(word_size, context);
if (result != NULL) {
return result;
} else {
// Let's try to allocate in the old gen in case we can fit the
// object there.
- return old_attempt_allocation(word_size);
+ return old_attempt_allocation(word_size, context);
}
} else {
assert(purpose == GCAllocForTenured, "sanity");
- HeapWord* result = old_attempt_allocation(word_size);
+ HeapWord* result = old_attempt_allocation(word_size, context);
if (result != NULL) {
return result;
} else {
// Let's try to allocate in the survivors in case we can fit the
// object there.
- return survivor_attempt_allocation(word_size);
+ return survivor_attempt_allocation(word_size, context);
}
}
@@ -4511,9 +4440,6 @@
return NULL;
}
-G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
- ParGCAllocBuffer(gclab_word_size), _retired(true) { }
-
void G1ParCopyHelper::mark_object(oop obj) {
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
@@ -6011,7 +5937,7 @@
}
}
- release_gc_alloc_regions(n_workers, evacuation_info);
+ _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
// Reset and re-enable the hot card cache.
@@ -6078,7 +6004,7 @@
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par) {
- assert(hr->startsHumongous(), "this is only for starts humongous regions");
+ assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
assert(free_list != NULL, "pre-condition");
size_t hr_capacity = hr->capacity();
@@ -6091,7 +6017,7 @@
uint i = hr->hrm_index() + 1;
while (i < last_index) {
HeapRegion* curr_hr = region_at(i);
- assert(curr_hr->continuesHumongous(), "invariant");
+ assert(curr_hr->is_continues_humongous(), "invariant");
curr_hr->clear_humongous();
free_region(curr_hr, free_list, par);
i += 1;
@@ -6117,10 +6043,7 @@
}
void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
- assert(_summary_bytes_used >= bytes,
- err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
- _summary_bytes_used, bytes));
- _summary_bytes_used -= bytes;
+ _allocator->decrease_used(bytes);
}
class G1ParCleanupCTTask : public AbstractGangTask {
@@ -6262,7 +6185,7 @@
bool failures() { return _failures; }
virtual bool doHeapRegion(HeapRegion* hr) {
- if (hr->continuesHumongous()) return false;
+ if (hr->is_continues_humongous()) return false;
bool result = _g1h->verify_bitmaps(_caller, hr);
if (!result) {
@@ -6441,7 +6364,7 @@
}
virtual bool doHeapRegion(HeapRegion* r) {
- if (!r->startsHumongous()) {
+ if (!r->is_starts_humongous()) {
return false;
}
@@ -6487,7 +6410,7 @@
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
- r->isHumongous(),
+ r->is_humongous(),
region_idx,
r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(),
@@ -6506,7 +6429,7 @@
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
- r->isHumongous(),
+ r->is_humongous(),
r->bottom(),
region_idx,
r->region_num(),
@@ -6696,7 +6619,7 @@
// We ignore young regions, we'll empty the young list afterwards.
// We ignore humongous regions, we're not tearing down the
// humongous regions set.
- assert(r->is_free() || r->is_young() || r->isHumongous(),
+ assert(r->is_free() || r->is_young() || r->is_humongous(),
"it cannot be another type");
}
return false;
@@ -6741,18 +6664,19 @@
}
bool doHeapRegion(HeapRegion* r) {
- if (r->continuesHumongous()) {
+ if (r->is_continues_humongous()) {
return false;
}
if (r->is_empty()) {
// Add free regions to the free list
r->set_free();
+ r->set_allocation_context(AllocationContext::system());
_hrm->insert_into_free_list(r);
} else if (!_free_list_only) {
assert(!r->is_young(), "we should not come across young regions");
- if (r->isHumongous()) {
+ if (r->is_humongous()) {
// We ignore humongous regions, we left the humongous set unchanged
} else {
// Objects that were compacted would have ended up on regions
@@ -6784,12 +6708,12 @@
heap_region_iterate(&cl);
if (!free_list_only) {
- _summary_bytes_used = cl.total_used();
- }
- assert(_summary_bytes_used == recalculate_used(),
- err_msg("inconsistent _summary_bytes_used, "
+ _allocator->set_used(cl.total_used());
+ }
+ assert(_allocator->used_unlocked() == recalculate_used(),
+ err_msg("inconsistent _allocator->used_unlocked(), "
"value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
- _summary_bytes_used, recalculate_used()));
+ _allocator->used_unlocked(), recalculate_used()));
}
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
@@ -6829,7 +6753,7 @@
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
- _summary_bytes_used += allocated_bytes;
+ _allocator->increase_used(allocated_bytes);
_hr_printer.retire(alloc_region);
// We update the eden sizes here, when the region is retired,
// instead of when it's allocated, since this is the point that its
@@ -6837,11 +6761,6 @@
g1mm()->update_eden_size();
}
-HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
- bool force) {
- return _g1h->new_mutator_alloc_region(word_size, force);
-}
-
void G1CollectedHeap::set_par_threads() {
// Don't change the number of workers. Use the value previously set
// in the workgroup.
@@ -6858,11 +6777,6 @@
set_par_threads(n_workers);
}
-void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
- size_t allocated_bytes) {
- _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
-}
-
// Methods for the GC alloc regions
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
@@ -6913,58 +6827,6 @@
_hr_printer.retire(alloc_region);
}
-HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
- bool force) {
- assert(!force, "not supported for GC alloc regions");
- return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
-}
-
-void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
- size_t allocated_bytes) {
- _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
- GCAllocForSurvived);
-}
-
-HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
- bool force) {
- assert(!force, "not supported for GC alloc regions");
- return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
-}
-
-void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
- size_t allocated_bytes) {
- _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
- GCAllocForTenured);
-}
-
-HeapRegion* OldGCAllocRegion::release() {
- HeapRegion* cur = get();
- if (cur != NULL) {
- // Determine how far we are from the next card boundary. If it is smaller than
- // the minimum object size we can allocate into, expand into the next card.
- HeapWord* top = cur->top();
- HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
-
- size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
-
- if (to_allocate_words != 0) {
- // We are not at a card boundary. Fill up, possibly into the next, taking the
- // end of the region and the minimum object size into account.
- to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
- MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
-
- // Skip allocation if there is not enough space to allocate even the smallest
- // possible object. In this case this region will not be retained, so the
- // original problem cannot occur.
- if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
- HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
- CollectedHeap::fill_with_object(dummy, to_allocate_words);
- }
- }
- }
- return G1AllocRegion::release();
-}
-
// Heap region set verification
class VerifyRegionListsClosure : public HeapRegionClosure {
@@ -6985,13 +6847,13 @@
_old_count(), _humongous_count(), _free_count(){ }
bool doHeapRegion(HeapRegion* hr) {
- if (hr->continuesHumongous()) {
+ if (hr->is_continues_humongous()) {
return false;
}
if (hr->is_young()) {
// TODO
- } else if (hr->startsHumongous()) {
+ } else if (hr->is_starts_humongous()) {
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
_humongous_count.increment(1u, hr->capacity());
} else if (hr->is_empty()) {
@@ -7072,7 +6934,7 @@
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
HeapRegion* hr = _g1h->heap_region_containing(obj);
- assert(!hr->continuesHumongous(),
+ assert(!hr->is_continues_humongous(),
err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
" starting at "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
@@ -7099,7 +6961,7 @@
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
HeapRegion* hr = _g1h->heap_region_containing(obj);
- assert(!hr->continuesHumongous(),
+ assert(!hr->is_continues_humongous(),
err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
" starting at "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/evacuationInfo.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
@@ -80,12 +82,6 @@
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
-enum GCAllocPurpose {
- GCAllocForTenured,
- GCAllocForSurvived,
- GCAllocPurposeCount
-};
-
class YoungList : public CHeapObj<mtGC> {
private:
G1CollectedHeap* _g1h;
@@ -158,40 +154,6 @@
void print();
};
-class MutatorAllocRegion : public G1AllocRegion {
-protected:
- virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
- virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
- MutatorAllocRegion()
- : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
-};
-
-class SurvivorGCAllocRegion : public G1AllocRegion {
-protected:
- virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
- virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
- SurvivorGCAllocRegion()
- : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
-};
-
-class OldGCAllocRegion : public G1AllocRegion {
-protected:
- virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
- virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
- OldGCAllocRegion()
- : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
-
- // This specialization of release() makes sure that the last card that has been
- // allocated into has been completely filled by a dummy object.
- // This avoids races when remembered set scanning wants to update the BOT of the
- // last card in the retained old gc alloc region, and allocation threads
- // allocating into that card at the same time.
- virtual HeapRegion* release();
-};
-
// The G1 STW is alive closure.
// An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW
@@ -222,6 +184,9 @@
friend class MutatorAllocRegion;
friend class SurvivorGCAllocRegion;
friend class OldGCAllocRegion;
+ friend class G1Allocator;
+ friend class G1DefaultAllocator;
+ friend class G1ResManAllocator;
// Closures used in implementation.
template <G1Barrier barrier, G1Mark do_mark_object>
@@ -232,6 +197,8 @@
friend class G1ParScanClosureSuper;
friend class G1ParEvacuateFollowersClosure;
friend class G1ParTask;
+ friend class G1ParGCAllocator;
+ friend class G1DefaultParGCAllocator;
friend class G1FreeGarbageRegionClosure;
friend class RefineCardTableEntryClosure;
friend class G1PrepareCompactClosure;
@@ -293,44 +260,18 @@
// The sequence of all heap regions in the heap.
HeapRegionManager _hrm;
- // Alloc region used to satisfy mutator allocation requests.
- MutatorAllocRegion _mutator_alloc_region;
+ // Class that handles the different kinds of allocations.
+ G1Allocator* _allocator;
- // Alloc region used to satisfy allocation requests by the GC for
- // survivor objects.
- SurvivorGCAllocRegion _survivor_gc_alloc_region;
+ // Statistics for each allocation context
+ AllocationContextStats _allocation_context_stats;
// PLAB sizing policy for survivors.
PLABStats _survivor_plab_stats;
- // Alloc region used to satisfy allocation requests by the GC for
- // old objects.
- OldGCAllocRegion _old_gc_alloc_region;
-
// PLAB sizing policy for tenured objects.
PLABStats _old_plab_stats;
- PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
- PLABStats* stats = NULL;
-
- switch (purpose) {
- case GCAllocForSurvived:
- stats = &_survivor_plab_stats;
- break;
- case GCAllocForTenured:
- stats = &_old_plab_stats;
- break;
- default:
- assert(false, "unrecognized GCAllocPurpose");
- }
-
- return stats;
- }
-
- // The last old region we allocated to during the last GC.
- // Typically, it is not full so we should re-use it during the next GC.
- HeapRegion* _retained_old_gc_alloc_region;
-
// It specifies whether we should attempt to expand the heap after a
// region allocation failure. If heap expansion fails we set this to
// false so that we don't re-attempt the heap expansion (it's likely
@@ -348,9 +289,6 @@
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
- // Setup the retained old gc alloc region as the currrent old gc alloc region.
- void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
-
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
@@ -361,13 +299,6 @@
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
- // Determines PLAB size for a particular allocation purpose.
- size_t desired_plab_sz(GCAllocPurpose purpose);
-
- // Outside of GC pauses, the number of bytes used in all regions other
- // than the current allocation region.
- size_t _summary_bytes_used;
-
// Records whether the region at the given index is kept live by roots or
// references from the young generation.
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
@@ -526,11 +457,12 @@
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
- size_t word_size);
+ size_t word_size,
+ AllocationContext_t context);
// Attempt to allocate a humongous object of the given size. Return
// NULL if unsuccessful.
- HeapWord* humongous_obj_allocate(size_t word_size);
+ HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
// The following two methods, allocate_new_tlab() and
// mem_allocate(), are the two main entry points from the runtime
@@ -586,6 +518,7 @@
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
HeapWord* attempt_allocation_slow(size_t word_size,
+ AllocationContext_t context,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret);
@@ -600,7 +533,8 @@
// specifies whether the mutator alloc region is expected to be NULL
// or not.
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
- bool expect_null_mutator_alloc_region);
+ AllocationContext_t context,
+ bool expect_null_mutator_alloc_region);
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
@@ -612,7 +546,9 @@
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
- HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
+ HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
+ size_t word_size,
+ AllocationContext_t context);
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
HeapRegion* alloc_region,
@@ -624,10 +560,12 @@
void par_allocate_remaining_space(HeapRegion* r);
// Allocation attempt during GC for a survivor object / PLAB.
- inline HeapWord* survivor_attempt_allocation(size_t word_size);
+ inline HeapWord* survivor_attempt_allocation(size_t word_size,
+ AllocationContext_t context);
// Allocation attempt during GC for an old object / PLAB.
- inline HeapWord* old_attempt_allocation(size_t word_size);
+ inline HeapWord* old_attempt_allocation(size_t word_size,
+ AllocationContext_t context);
// These methods are the "callbacks" from the G1AllocRegion class.
@@ -666,13 +604,15 @@
// Callback from VM_G1CollectForAllocation operation.
// This function does everything necessary/possible to satisfy a
// failed allocation request (including collection, expansion, etc.)
- HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
+ HeapWord* satisfy_failed_allocation(size_t word_size,
+ AllocationContext_t context,
+ bool* succeeded);
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
- HeapWord* expand_and_allocate(size_t word_size);
+ HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
// Process any reference objects discovered during
// an incremental evacuation pause.
@@ -684,6 +624,10 @@
public:
+ G1Allocator* allocator() {
+ return _allocator;
+ }
+
G1MonitoringSupport* g1mm() {
assert(_g1mm != NULL, "should have been initialized");
return _g1mm;
@@ -695,6 +639,29 @@
// (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes);
+ // Returns the PLAB statistics given a purpose.
+ PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
+ PLABStats* stats = NULL;
+
+ switch (purpose) {
+ case GCAllocForSurvived:
+ stats = &_survivor_plab_stats;
+ break;
+ case GCAllocForTenured:
+ stats = &_old_plab_stats;
+ break;
+ default:
+ assert(false, "unrecognized GCAllocPurpose");
+ }
+
+ return stats;
+ }
+
+ // Determines PLAB size for a particular allocation purpose.
+ size_t desired_plab_sz(GCAllocPurpose purpose);
+
+ inline AllocationContextStats& allocation_context_stats();
+
// Do anything common to GC's.
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
@@ -1272,7 +1239,7 @@
// Determine whether the given region is one that we are using as an
// old GC alloc region.
bool is_old_gc_alloc_region(HeapRegion* hr) {
- return hr == _retained_old_gc_alloc_region;
+ return _allocator->is_retained_old_region(hr);
}
// Perform a collection of the heap; intended for use in implementing
@@ -1283,6 +1250,11 @@
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
+ virtual void copy_allocation_context_stats(const jint* contexts,
+ jlong* totals,
+ jbyte* accuracy,
+ jint len);
+
// True iff an evacuation has failed in the most-recent collection.
bool evacuation_failed() { return _evacuation_failed; }
@@ -1540,7 +1512,7 @@
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
// Returns "true" iff the given word_size is "very large".
- static bool isHumongous(size_t word_size) {
+ static bool is_humongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs
// are capped at the humongous threshold and we want to
// ensure that we don't try to allocate a TLAB as
@@ -1747,28 +1719,4 @@
size_t _max_heap_capacity;
};
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
-private:
- bool _retired;
-
-public:
- G1ParGCAllocBuffer(size_t gclab_word_size);
- virtual ~G1ParGCAllocBuffer() {
- guarantee(_retired, "Allocation buffer has not been retired");
- }
-
- virtual void set_buf(HeapWord* buf) {
- ParGCAllocBuffer::set_buf(buf);
- _retired = false;
- }
-
- virtual void retire(bool end_of_gc, bool retain) {
- if (_retired) {
- return;
- }
- ParGCAllocBuffer::retire(end_of_gc, retain);
- _retired = true;
- }
-};
-
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -37,14 +37,18 @@
// Inline functions for G1CollectedHeap
+inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
+ return _allocation_context_stats;
+}
+
// Return the region with the given index. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
assert(is_in_reserved(addr),
err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
- p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
- return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
+ p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
+ return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
@@ -63,7 +67,7 @@
template <class T>
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
HeapRegion* hr = heap_region_containing_raw(addr);
- if (hr->continuesHumongous()) {
+ if (hr->is_continues_humongous()) {
return hr->humongous_start_region();
}
return hr;
@@ -95,13 +99,15 @@
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
- assert(!isHumongous(word_size), "attempt_allocation() should not "
+ assert(!is_humongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
- HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
- false /* bot_updates */);
+ AllocationContext_t context = AllocationContext::current();
+ HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+ false /* bot_updates */);
if (result == NULL) {
result = attempt_allocation_slow(word_size,
+ context,
gc_count_before_ret,
gclocker_retry_count_ret);
}
@@ -112,17 +118,17 @@
return result;
}
-inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
- word_size) {
- assert(!isHumongous(word_size),
+inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
+ AllocationContext_t context) {
+ assert(!is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
- HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
- false /* bot_updates */);
+ HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
+ false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
- result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
- false /* bot_updates */);
+ result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+ false /* bot_updates */);
}
if (result != NULL) {
dirty_young_block(result, word_size);
@@ -130,16 +136,17 @@
return result;
}
-inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
- assert(!isHumongous(word_size),
+inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
+ AllocationContext_t context) {
+ assert(!is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
- HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
- true /* bot_updates */);
+ HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
+ true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
- result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
- true /* bot_updates */);
+ result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+ true /* bot_updates */);
}
return result;
}
@@ -159,7 +166,7 @@
assert(word_size > 0, "pre-condition");
assert(containing_hr->is_in(start), "it should contain start");
assert(containing_hr->is_young(), "it should be young");
- assert(!containing_hr->isHumongous(), "it should not be humongous");
+ assert(!containing_hr->is_humongous(), "it should not be humongous");
HeapWord* end = start + word_size;
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
+ jlong* totals,
+ jbyte* accuracy,
+ jint len) {
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -192,7 +192,7 @@
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
bool during_conc_mark = _g1h->mark_in_progress();
- assert(!hr->isHumongous(), "sanity");
+ assert(!hr->is_humongous(), "sanity");
assert(hr->in_collection_set(), "bad CS");
if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -43,9 +43,7 @@
_hot_cache_idx = 0;
// For refining the cards in the hot cache in parallel
- uint n_workers = (ParallelGCThreads > 0 ?
- _g1h->workers()->total_workers() : 1);
- _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
+ _hot_cache_par_chunk_size = (ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
_hot_cache_par_claimed_idx = 0;
_card_counts.initialize(card_counts_storage);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -70,6 +70,9 @@
G1CardCounts _card_counts;
+ // The number of cached cards a thread claims when flushing the cache
+ static const int ClaimChunkSize = 32;
+
bool default_use_cache() const {
return (G1ConcRSLogCacheSize > 0);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -193,76 +193,6 @@
gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
}
-class G1PrepareCompactClosure: public HeapRegionClosure {
- G1CollectedHeap* _g1h;
- ModRefBarrierSet* _mrbs;
- CompactPoint _cp;
- HeapRegionSetCount _humongous_regions_removed;
-
- bool is_cp_initialized() const {
- return _cp.space != NULL;
- }
-
- void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
- // If this is the first live region that we came across which we can compact,
- // initialize the CompactPoint.
- if (!is_cp_initialized()) {
- _cp.space = hr;
- _cp.threshold = hr->initialize_threshold();
- }
- hr->prepare_for_compaction(&_cp);
- // Also clear the part of the card table that will be unused after
- // compaction.
- _mrbs->clear(MemRegion(hr->compaction_top(), end));
- }
-
- void free_humongous_region(HeapRegion* hr) {
- HeapWord* end = hr->end();
- FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
-
- assert(hr->startsHumongous(),
- "Only the start of a humongous region should be freed.");
-
- hr->set_containing_set(NULL);
- _humongous_regions_removed.increment(1u, hr->capacity());
-
- _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
- prepare_for_compaction(hr, end);
- dummy_free_list.remove_all();
- }
-
-public:
- G1PrepareCompactClosure()
- : _g1h(G1CollectedHeap::heap()),
- _mrbs(_g1h->g1_barrier_set()),
- _cp(NULL),
- _humongous_regions_removed() { }
-
- void update_sets() {
- // We'll recalculate total used bytes and recreate the free list
- // at the end of the GC, so no point in updating those values here.
- HeapRegionSetCount empty_set;
- _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
- }
-
- bool doHeapRegion(HeapRegion* hr) {
- if (hr->isHumongous()) {
- if (hr->startsHumongous()) {
- oop obj = oop(hr->bottom());
- if (obj->is_gc_marked()) {
- obj->forward_to(obj);
- } else {
- free_humongous_region(hr);
- }
- } else {
- assert(hr->continuesHumongous(), "Invalid humongous.");
- }
- } else {
- prepare_for_compaction(hr, hr->end());
- }
- return false;
- }
-};
void G1MarkSweep::mark_sweep_phase2() {
// Now all live objects are marked, compute the new object addresses.
@@ -271,21 +201,17 @@
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace("2");
- G1PrepareCompactClosure blk;
- g1h->heap_region_iterate(&blk);
- blk.update_sets();
+ prepare_compaction();
}
class G1AdjustPointersClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
- if (r->isHumongous()) {
- if (r->startsHumongous()) {
+ if (r->is_humongous()) {
+ if (r->is_starts_humongous()) {
// We must adjust the pointers on the single H object.
oop obj = oop(r->bottom());
// point all the oops to the new location
@@ -340,8 +266,8 @@
G1SpaceCompactClosure() {}
bool doHeapRegion(HeapRegion* hr) {
- if (hr->isHumongous()) {
- if (hr->startsHumongous()) {
+ if (hr->is_humongous()) {
+ if (hr->is_starts_humongous()) {
oop obj = oop(hr->bottom());
if (obj->is_gc_marked()) {
obj->init_mark();
@@ -373,3 +299,68 @@
g1h->heap_region_iterate(&blk);
}
+
+void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ g1h->heap_region_iterate(blk);
+ blk->update_sets();
+}
+
+void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
+ HeapWord* end = hr->end();
+ FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
+
+ assert(hr->is_starts_humongous(),
+ "Only the start of a humongous region should be freed.");
+
+ hr->set_containing_set(NULL);
+ _humongous_regions_removed.increment(1u, hr->capacity());
+
+ _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
+ prepare_for_compaction(hr, end);
+ dummy_free_list.remove_all();
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
+ // If this is the first live region that we came across which we can compact,
+ // initialize the CompactPoint.
+ if (!is_cp_initialized()) {
+ _cp.space = hr;
+ _cp.threshold = hr->initialize_threshold();
+ }
+ prepare_for_compaction_work(&_cp, hr, end);
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
+ HeapRegion* hr,
+ HeapWord* end) {
+ hr->prepare_for_compaction(cp);
+ // Also clear the part of the card table that will be unused after
+ // compaction.
+ _mrbs->clear(MemRegion(hr->compaction_top(), end));
+}
+
+void G1PrepareCompactClosure::update_sets() {
+ // We'll recalculate total used bytes and recreate the free list
+ // at the end of the GC, so no point in updating those values here.
+ HeapRegionSetCount empty_set;
+ _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
+}
+
+bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
+ if (hr->is_humongous()) {
+ if (hr->is_starts_humongous()) {
+ oop obj = oop(hr->bottom());
+ if (obj->is_gc_marked()) {
+ obj->forward_to(obj);
+ } else {
+ free_humongous_region(hr);
+ }
+ } else {
+ assert(hr->is_continues_humongous(), "Invalid humongous.");
+ }
+ } else {
+ prepare_for_compaction(hr, hr->end());
+ }
+ return false;
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -43,7 +43,7 @@
// compaction.
//
// Class unloading will only occur when a full gc is invoked.
-
+class G1PrepareCompactClosure;
class G1MarkSweep : AllStatic {
friend class VM_G1MarkSweep;
@@ -70,6 +70,30 @@
static void mark_sweep_phase4();
static void allocate_stacks();
+ static void prepare_compaction();
+ static void prepare_compaction_work(G1PrepareCompactClosure* blk);
+};
+
+class G1PrepareCompactClosure : public HeapRegionClosure {
+ protected:
+ G1CollectedHeap* _g1h;
+ ModRefBarrierSet* _mrbs;
+ CompactPoint _cp;
+ HeapRegionSetCount _humongous_regions_removed;
+
+ virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
+ void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
+ void free_humongous_region(HeapRegion* hr);
+ bool is_cp_initialized() const { return _cp.space != NULL; }
+
+ public:
+ G1PrepareCompactClosure() :
+ _g1h(G1CollectedHeap::heap()),
+ _mrbs(_g1h->g1_barrier_set()),
+ _humongous_regions_removed() { }
+
+ void update_sets();
+ bool doHeapRegion(HeapRegion* hr);
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1MarkSweep.hpp"
+
+void G1MarkSweep::prepare_compaction() {
+ G1PrepareCompactClosure blk;
+ G1MarkSweep::prepare_compaction_work(&blk);
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -38,11 +38,8 @@
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
- _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
- _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
_age_table(false), _scanner(g1h, rp),
- _strong_roots_time(0), _term_time(0),
- _alloc_buffer_waste(0), _undo_waste(0) {
+ _strong_roots_time(0), _term_time(0) {
_scanner.set_par_scan_thread_state(this);
// we allocate G1YoungSurvRateNumRegions plus one entries, since
// we "sacrifice" entry 0 to keep track of surviving bytes for
@@ -60,14 +57,14 @@
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
- _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
- _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
+ _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
_start = os::elapsedTime();
}
G1ParScanThreadState::~G1ParScanThreadState() {
- retire_alloc_buffers();
+ _g1_par_allocator->retire_alloc_buffers();
+ delete _g1_par_allocator;
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
@@ -90,14 +87,16 @@
const double elapsed_ms = elapsed_time() * 1000.0;
const double s_roots_ms = strong_roots_time() * 1000.0;
const double term_ms = term_time() * 1000.0;
+ const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
+ const size_t undo_waste = _g1_par_allocator->undo_waste();
st->print_cr("%3d %9.2f %9.2f %6.2f "
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
- (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
- alloc_buffer_waste() * HeapWordSize / K,
- undo_waste() * HeapWordSize / K);
+ (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
+ alloc_buffer_waste * HeapWordSize / K,
+ undo_waste * HeapWordSize / K);
}
#ifdef ASSERT
@@ -164,12 +163,13 @@
: m->age();
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
- HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
+ AllocationContext_t context = from_region->allocation_context();
+ HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
if (obj_ptr != NULL) {
- undo_allocation(alloc_purpose, obj_ptr, word_sz);
+ _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj_ptr = NULL;
}
}
@@ -246,66 +246,8 @@
obj->oop_iterate_backwards(&_scanner);
}
} else {
- undo_allocation(alloc_purpose, obj_ptr, word_sz);
+ _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj = forward_ptr;
}
return obj;
}
-
-HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
- HeapWord* obj = NULL;
- size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
- if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
- G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
- add_to_alloc_buffer_waste(alloc_buf->words_remaining());
- alloc_buf->retire(false /* end_of_gc */, false /* retain */);
-
- HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
- if (buf == NULL) {
- return NULL; // Let caller handle allocation failure.
- }
- // Otherwise.
- alloc_buf->set_word_size(gclab_word_size);
- alloc_buf->set_buf(buf);
-
- obj = alloc_buf->allocate(word_sz);
- assert(obj != NULL, "buffer was definitely big enough...");
- } else {
- obj = _g1h->par_allocate_during_gc(purpose, word_sz);
- }
- return obj;
-}
-
-void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
- if (alloc_buffer(purpose)->contains(obj)) {
- assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
- "should contain whole object");
- alloc_buffer(purpose)->undo_allocation(obj, word_sz);
- } else {
- CollectedHeap::fill_with_object(obj, word_sz);
- add_to_undo_waste(word_sz);
- }
-}
-
-HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
- HeapWord* obj = NULL;
- if (purpose == GCAllocForSurvived) {
- obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
- } else {
- obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
- }
- if (obj != NULL) {
- return obj;
- }
- return allocate_slow(purpose, word_sz);
-}
-
-void G1ParScanThreadState::retire_alloc_buffers() {
- for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
- size_t waste = _alloc_buffers[ap]->words_remaining();
- add_to_alloc_buffer_waste(waste);
- _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
- true /* end_of_gc */,
- false /* retain */);
- }
-}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -46,9 +46,8 @@
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
- G1ParGCAllocBuffer _surviving_alloc_buffer;
- G1ParGCAllocBuffer _tenured_alloc_buffer;
- G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+ G1ParGCAllocator* _g1_par_allocator;
+
ageTable _age_table;
G1ParScanClosure _scanner;
@@ -78,7 +77,6 @@
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
-
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
@@ -90,13 +88,6 @@
ageTable* age_table() { return &_age_table; }
- G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
- return _alloc_buffers[purpose];
- }
-
- size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
- size_t undo_waste() const { return _undo_waste; }
-
#ifdef ASSERT
bool queue_is_empty() const { return _refs->is_empty(); }
@@ -110,7 +101,7 @@
_refs->push(ref);
}
- template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
+ template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
@@ -121,12 +112,6 @@
}
}
}
- private:
-
- inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
- inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
- inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
-
public:
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
@@ -172,8 +157,6 @@
}
private:
- void retire_alloc_buffers();
-
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) const {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -413,7 +413,7 @@
_ctbs(_g1h->g1_barrier_set()) {}
bool doHeapRegion(HeapRegion* r) {
- if (!r->continuesHumongous()) {
+ if (!r->is_continues_humongous()) {
r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
}
return false;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -119,7 +119,7 @@
// Record, if necessary, the fact that *p (where "p" is in region "from",
// which is required to be non-NULL) has changed to a new non-NULL value.
template <class T> void write_ref(HeapRegion* from, T* p);
- template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
+ template <class T> void par_write_ref(HeapRegion* from, T* p, uint tid);
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -44,7 +44,7 @@
}
template <class T>
-inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
+inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
oop obj = oopDesc::load_decode_heap_oop(p);
if (obj == NULL) {
return;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -263,7 +263,7 @@
current = &_free;
} else if (r->is_young()) {
current = &_young;
- } else if (r->isHumongous()) {
+ } else if (r->is_humongous()) {
current = &_humonguous;
} else if (r->is_old()) {
current = &_old;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -28,6 +28,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionBounds.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/shared/liveRange.hpp"
@@ -138,32 +139,16 @@
}
}
-// Minimum region size; we won't go lower than that.
-// We might want to decrease this in the future, to deal with small
-// heaps a bit more efficiently.
-#define MIN_REGION_SIZE ( 1024 * 1024 )
-
-// Maximum region size; we don't go higher than that. There's a good
-// reason for having an upper bound. We don't want regions to get too
-// large, otherwise cleanup's effectiveness would decrease as there
-// will be fewer opportunities to find totally empty regions after
-// marking.
-#define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
-
-// The automatic region size calculation will try to have around this
-// many regions in the heap (based on the min heap size).
-#define TARGET_REGION_NUMBER 2048
-
size_t HeapRegion::max_region_size() {
- return (size_t)MAX_REGION_SIZE;
+ return HeapRegionBounds::max_size();
}
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
- region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
- (uintx) MIN_REGION_SIZE);
+ region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
+ (uintx) HeapRegionBounds::min_size());
}
int region_size_log = log2_long((jlong) region_size);
@@ -173,10 +158,10 @@
region_size = ((uintx)1 << region_size_log);
// Now make sure that we don't go over or under our limits.
- if (region_size < MIN_REGION_SIZE) {
- region_size = MIN_REGION_SIZE;
- } else if (region_size > MAX_REGION_SIZE) {
- region_size = MAX_REGION_SIZE;
+ if (region_size < HeapRegionBounds::min_size()) {
+ region_size = HeapRegionBounds::min_size();
+ } else if (region_size > HeapRegionBounds::max_size()) {
+ region_size = HeapRegionBounds::max_size();
}
// And recalculate the log.
@@ -213,11 +198,12 @@
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
assert(_humongous_start_region == NULL,
"we should have already filtered out humongous regions");
- assert(_end == _orig_end,
+ assert(_end == orig_end(),
"we should have already filtered out humongous regions");
_in_collection_set = false;
+ set_allocation_context(AllocationContext::system());
set_young_index_in_cset(-1);
uninstall_surv_rate_group();
set_free();
@@ -264,9 +250,9 @@
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
}
-void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
- assert(!isHumongous(), "sanity / pre-condition");
- assert(end() == _orig_end,
+void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
+ assert(!is_humongous(), "sanity / pre-condition");
+ assert(end() == orig_end(),
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
@@ -278,30 +264,30 @@
_offsets.set_for_starts_humongous(new_top);
}
-void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
- assert(!isHumongous(), "sanity / pre-condition");
- assert(end() == _orig_end,
+void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
+ assert(!is_humongous(), "sanity / pre-condition");
+ assert(end() == orig_end(),
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
- assert(first_hr->startsHumongous(), "pre-condition");
+ assert(first_hr->is_starts_humongous(), "pre-condition");
_type.set_continues_humongous();
_humongous_start_region = first_hr;
}
void HeapRegion::clear_humongous() {
- assert(isHumongous(), "pre-condition");
+ assert(is_humongous(), "pre-condition");
- if (startsHumongous()) {
+ if (is_starts_humongous()) {
assert(top() <= end(), "pre-condition");
- set_end(_orig_end);
+ set_end(orig_end());
if (top() > end()) {
// at least one "continues humongous" region after it
set_top(end());
}
} else {
// continues humongous
- assert(end() == _orig_end, "sanity");
+ assert(end() == orig_end(), "sanity");
}
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
@@ -324,9 +310,10 @@
MemRegion mr) :
G1OffsetTableContigSpace(sharedOffsetArray, mr),
_hrm_index(hrm_index),
+ _allocation_context(AllocationContext::system()),
_humongous_start_region(NULL),
_in_collection_set(false),
- _next_in_special_set(NULL), _orig_end(NULL),
+ _next_in_special_set(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_next_young_region(NULL),
@@ -349,10 +336,14 @@
G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
- _orig_end = mr.end();
hr_clear(false /*par*/, false /*clear_space*/);
set_top(bottom());
record_top_and_timestamp();
+
+ assert(mr.end() == orig_end(),
+ err_msg("Given region end address " PTR_FORMAT " should match exactly "
+ "bottom plus one region size, i.e. " PTR_FORMAT,
+ p2i(mr.end()), p2i(orig_end())));
}
CompactibleSpace* HeapRegion::next_compaction_space() const {
@@ -663,7 +654,7 @@
return;
}
- if (continuesHumongous()) {
+ if (is_continues_humongous()) {
if (strong_code_roots_length > 0) {
gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
"region but has "SIZE_FORMAT" code root entries",
@@ -683,6 +674,8 @@
void HeapRegion::print() const { print_on(gclog_or_tty); }
void HeapRegion::print_on(outputStream* st) const {
+ st->print("AC%4u", allocation_context());
+
st->print(" %2s", get_short_type_str());
if (in_collection_set())
st->print(" CS");
@@ -788,7 +781,7 @@
HeapRegion* to = _g1h->heap_region_containing(obj);
if (from != NULL && to != NULL &&
from != to &&
- !to->isHumongous()) {
+ !to->is_humongous()) {
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
jbyte cv_field = *_bs->byte_for_const(p);
const jbyte dirty = CardTableModRefBS::dirty_card_val();
@@ -842,19 +835,19 @@
HeapWord* p = bottom();
HeapWord* prev_p = NULL;
VerifyLiveClosure vl_cl(g1, vo);
- bool is_humongous = isHumongous();
+ bool is_region_humongous = is_humongous();
size_t object_num = 0;
while (p < top()) {
oop obj = oop(p);
size_t obj_size = block_size(p);
object_num += 1;
- if (is_humongous != g1->isHumongous(obj_size) &&
+ if (is_region_humongous != g1->is_humongous(obj_size) &&
!g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
SIZE_FORMAT" words) in a %shumongous region",
- p, g1->isHumongous(obj_size) ? "" : "non-",
- obj_size, is_humongous ? "" : "non-");
+ p, g1->is_humongous(obj_size) ? "" : "non-",
+ obj_size, is_region_humongous ? "" : "non-");
*failures = true;
return;
}
@@ -963,7 +956,7 @@
}
}
- if (is_humongous && object_num > 1) {
+ if (is_region_humongous && object_num > 1) {
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
"but has "SIZE_FORMAT", objects",
bottom(), end(), object_num);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
+#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
#include "gc_implementation/g1/heapRegionType.hpp"
@@ -222,13 +223,12 @@
// The index of this region in the heap region sequence.
uint _hrm_index;
+ AllocationContext_t _allocation_context;
+
HeapRegionType _type;
// For a humongous region, region in which it starts.
HeapRegion* _humongous_start_region;
- // For the start region of a humongous sequence, it's original end().
- HeapWord* _orig_end;
-
// True iff the region is in current collection_set.
bool _in_collection_set;
@@ -417,9 +417,9 @@
bool is_eden() const { return _type.is_eden(); }
bool is_survivor() const { return _type.is_survivor(); }
- bool isHumongous() const { return _type.is_humongous(); }
- bool startsHumongous() const { return _type.is_starts_humongous(); }
- bool continuesHumongous() const { return _type.is_continues_humongous(); }
+ bool is_humongous() const { return _type.is_humongous(); }
+ bool is_starts_humongous() const { return _type.is_starts_humongous(); }
+ bool is_continues_humongous() const { return _type.is_continues_humongous(); }
bool is_old() const { return _type.is_old(); }
@@ -431,10 +431,10 @@
// Return the number of distinct regions that are covered by this region:
// 1 if the region is not humongous, >= 1 if the region is humongous.
uint region_num() const {
- if (!isHumongous()) {
+ if (!is_humongous()) {
return 1U;
} else {
- assert(startsHumongous(), "doesn't make sense on HC regions");
+ assert(is_starts_humongous(), "doesn't make sense on HC regions");
assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
}
@@ -443,7 +443,7 @@
// Return the index + 1 of the last HC regions that's associated
// with this HS region.
uint last_hc_index() const {
- assert(startsHumongous(), "don't call this otherwise");
+ assert(is_starts_humongous(), "don't call this otherwise");
return hrm_index() + region_num();
}
@@ -452,7 +452,7 @@
// their _end set up to be the end of the last continues region of the
// corresponding humongous object.
bool is_in_reserved_raw(const void* p) const {
- return _bottom <= p && p < _orig_end;
+ return _bottom <= p && p < orig_end();
}
// Makes the current region be a "starts humongous" region, i.e.,
@@ -478,12 +478,12 @@
// humongous regions can be calculated by just looking at the
// "starts humongous" regions and by ignoring the "continues
// humongous" regions.
- void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
+ void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
// Makes the current region be a "continues humongous'
// region. first_hr is the "start humongous" region of the series
// which this region will be part of.
- void set_continuesHumongous(HeapRegion* first_hr);
+ void set_continues_humongous(HeapRegion* first_hr);
// Unsets the humongous-related fields on the region.
void clear_humongous();
@@ -513,6 +513,14 @@
_next_in_special_set = r;
}
+ void set_allocation_context(AllocationContext_t context) {
+ _allocation_context = context;
+ }
+
+ AllocationContext_t allocation_context() const {
+ return _allocation_context;
+ }
+
// Methods used by the HeapRegionSetBase class and subclasses.
// Getter and setter for the next and prev fields used to link regions into
@@ -556,7 +564,8 @@
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
- HeapWord* orig_end() const { return _orig_end; }
+ // For the start region of a humongous sequence, it's original end().
+ HeapWord* orig_end() const { return _bottom + GrainWords; }
// Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space, bool locked = false);
@@ -603,7 +612,7 @@
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
void reset_during_compaction() {
- assert(isHumongous() && startsHumongous(),
+ assert(is_starts_humongous(),
"should only be called for starts humongous regions");
zero_marked_bytes();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
+
+class HeapRegionBounds : public AllStatic {
+private:
+ // Minimum region size; we won't go lower than that.
+ // We might want to decrease this in the future, to deal with small
+ // heaps a bit more efficiently.
+ static const size_t MIN_REGION_SIZE = 1024 * 1024;
+
+ // Maximum region size; we don't go higher than that. There's a good
+ // reason for having an upper bound. We don't want regions to get too
+ // large, otherwise cleanup's effectiveness would decrease as there
+ // will be fewer opportunities to find totally empty regions after
+ // marking.
+ static const size_t MAX_REGION_SIZE = 32 * 1024 * 1024;
+
+ // The automatic region size calculation will try to have around this
+ // many regions in the heap (based on the min heap size).
+ static const size_t TARGET_REGION_NUMBER = 2048;
+
+public:
+ static inline size_t min_size();
+ static inline size_t max_size();
+ static inline size_t target_number();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc_implementation/g1/heapRegionBounds.hpp"
+
+size_t HeapRegionBounds::min_size() {
+ return MIN_REGION_SIZE;
+}
+
+size_t HeapRegionBounds::max_size() {
+ return MAX_REGION_SIZE;
+}
+
+size_t HeapRegionBounds::target_number() {
+ return TARGET_REGION_NUMBER;
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -66,10 +66,11 @@
#endif
HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
- HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index);
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
assert(reserved().contains(mr), "invariant");
- return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr);
+ return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
}
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
@@ -281,7 +282,7 @@
// We'll ignore "continues humongous" regions (we'll process them
// when we come across their corresponding "start humongous"
// region) and regions already claimed.
- if (r->claim_value() == claim_value || r->continuesHumongous()) {
+ if (r->claim_value() == claim_value || r->is_continues_humongous()) {
continue;
}
// OK, try to claim it
@@ -289,7 +290,7 @@
continue;
}
// Success!
- if (r->startsHumongous()) {
+ if (r->is_starts_humongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In one case, calling the
@@ -301,7 +302,7 @@
for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
HeapRegion* chr = _regions.get_by_index(ch_index);
- assert(chr->continuesHumongous(), "Must be humongous region");
+ assert(chr->is_continues_humongous(), "Must be humongous region");
assert(chr->humongous_start_region() == r,
err_msg("Must work on humongous continuation of the original start region "
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
@@ -311,7 +312,7 @@
bool claim_result = chr->claimHeapRegion(claim_value);
// We should always be able to claim it; no one else should
// be trying to claim this region.
- guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
+ guarantee(claim_result, "We should always be able to claim the is_continues_humongous part of the humongous object");
bool res2 = blk->doHeapRegion(chr);
if (res2) {
@@ -322,7 +323,7 @@
// does something with "continues humongous" regions
// clears them). We might have to weaken it in the future,
// but let's leave these two asserts here for extra safety.
- assert(chr->continuesHumongous(), "should still be the case");
+ assert(chr->is_continues_humongous(), "should still be the case");
assert(chr->humongous_start_region() == r, "sanity");
}
}
@@ -424,7 +425,7 @@
// this method may be called, we have only completed allocation of the regions,
// but not put into a region set.
prev_committed = true;
- if (hr->startsHumongous()) {
+ if (hr->is_starts_humongous()) {
prev_end = hr->orig_end();
} else {
prev_end = hr->end();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -419,7 +419,7 @@
FromCardCache::print();
}
-void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
+void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
uint cur_hrm_ind = hr()->hrm_index();
if (G1TraceHeapRegionRememberedSet) {
@@ -435,10 +435,10 @@
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
hr()->bottom(), from_card,
- FromCardCache::at((uint)tid, cur_hrm_ind));
+ FromCardCache::at(tid, cur_hrm_ind));
}
- if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
+ if (FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" from-card cache hit.");
}
@@ -493,7 +493,7 @@
return;
} else {
if (G1TraceHeapRegionRememberedSet) {
- gclog_or_tty->print_cr(" [tid %d] sparse table entry "
+ gclog_or_tty->print_cr(" [tid %u] sparse table entry "
"overflow(f: %d, t: %u)",
tid, from_hrm_ind, cur_hrm_ind);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -179,7 +179,7 @@
// For now. Could "expand" some tables in the future, so that this made
// sense.
- void add_reference(OopOrNarrowOopStar from, int tid);
+ void add_reference(OopOrNarrowOopStar from, uint tid);
// Removes any entries shown by the given bitmaps to contain only dead
// objects.
@@ -301,7 +301,7 @@
}
// Used in the parallel case.
- void add_reference(OopOrNarrowOopStar from, int tid) {
+ void add_reference(OopOrNarrowOopStar from, uint tid) {
_other_regions.add_reference(from, tid);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -41,7 +41,7 @@
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
- assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
+ assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -30,8 +30,8 @@
case FreeTag:
case EdenTag:
case SurvTag:
- case HumStartsTag:
- case HumContTag:
+ case StartsHumongousTag:
+ case ContinuesHumongousTag:
case OldTag:
return true;
}
@@ -41,12 +41,12 @@
const char* HeapRegionType::get_str() const {
hrt_assert_is_valid(_tag);
switch (_tag) {
- case FreeTag: return "FREE";
- case EdenTag: return "EDEN";
- case SurvTag: return "SURV";
- case HumStartsTag: return "HUMS";
- case HumContTag: return "HUMC";
- case OldTag: return "OLD";
+ case FreeTag: return "FREE";
+ case EdenTag: return "EDEN";
+ case SurvTag: return "SURV";
+ case StartsHumongousTag: return "HUMS";
+ case ContinuesHumongousTag: return "HUMC";
+ case OldTag: return "OLD";
}
ShouldNotReachHere();
// keep some compilers happy
@@ -56,12 +56,12 @@
const char* HeapRegionType::get_short_str() const {
hrt_assert_is_valid(_tag);
switch (_tag) {
- case FreeTag: return "F";
- case EdenTag: return "E";
- case SurvTag: return "S";
- case HumStartsTag: return "HS";
- case HumContTag: return "HC";
- case OldTag: return "O";
+ case FreeTag: return "F";
+ case EdenTag: return "E";
+ case SurvTag: return "S";
+ case StartsHumongousTag: return "HS";
+ case ContinuesHumongousTag: return "HC";
+ case OldTag: return "O";
}
ShouldNotReachHere();
// keep some compilers happy
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -49,22 +49,22 @@
// 0001 1 [ 3] Survivor
//
// 0010 0 Humongous Mask
- // 0010 0 [ 4] Humongous Starts
- // 0010 1 [ 5] Humongous Continues
+ // 0010 0 [ 4] Starts Humongous
+ // 0010 1 [ 5] Continues Humongous
//
// 01000 [ 8] Old
typedef enum {
- FreeTag = 0,
+ FreeTag = 0,
- YoungMask = 2,
- EdenTag = YoungMask,
- SurvTag = YoungMask + 1,
+ YoungMask = 2,
+ EdenTag = YoungMask,
+ SurvTag = YoungMask + 1,
- HumMask = 4,
- HumStartsTag = HumMask,
- HumContTag = HumMask + 1,
+ HumongousMask = 4,
+ StartsHumongousTag = HumongousMask,
+ ContinuesHumongousTag = HumongousMask + 1,
- OldTag = 8
+ OldTag = 8
} Tag;
volatile Tag _tag;
@@ -104,9 +104,9 @@
bool is_eden() const { return get() == EdenTag; }
bool is_survivor() const { return get() == SurvTag; }
- bool is_humongous() const { return (get() & HumMask) != 0; }
- bool is_starts_humongous() const { return get() == HumStartsTag; }
- bool is_continues_humongous() const { return get() == HumContTag; }
+ bool is_humongous() const { return (get() & HumongousMask) != 0; }
+ bool is_starts_humongous() const { return get() == StartsHumongousTag; }
+ bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
bool is_old() const { return get() == OldTag; }
@@ -118,8 +118,8 @@
void set_eden_pre_gc() { set_from(EdenTag, SurvTag); }
void set_survivor() { set_from(SurvTag, FreeTag); }
- void set_starts_humongous() { set_from(HumStartsTag, FreeTag); }
- void set_continues_humongous() { set_from(HumContTag, FreeTag); }
+ void set_starts_humongous() { set_from(StartsHumongousTag, FreeTag); }
+ void set_continues_humongous() { set_from(ContinuesHumongousTag, FreeTag); }
void set_old() { set(OldTag); }
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -45,11 +45,13 @@
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionManager, _num_committed, uint) \
\
+ nonstatic_field(G1Allocator, _summary_bytes_used, size_t) \
+ \
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
- nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
+ nonstatic_field(G1CollectedHeap, _allocator, G1Allocator*) \
\
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
@@ -72,14 +74,16 @@
\
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
declare_type(HeapRegion, G1OffsetTableContigSpace) \
- declare_toplevel_type(HeapRegionManager) \
+ declare_toplevel_type(HeapRegionManager) \
declare_toplevel_type(HeapRegionSetBase) \
declare_toplevel_type(HeapRegionSetCount) \
declare_toplevel_type(G1MonitoringSupport) \
+ declare_toplevel_type(G1Allocator) \
\
declare_toplevel_type(G1CollectedHeap*) \
declare_toplevel_type(HeapRegion*) \
declare_toplevel_type(G1MonitoringSupport*) \
+ declare_toplevel_type(G1Allocator*) \
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -45,7 +45,8 @@
void VM_G1CollectForAllocation::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause);
- _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
+
+ _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
assert(_result == NULL || _pause_succeeded,
"if we get back a result, the pause should have succeeded");
}
@@ -99,7 +100,7 @@
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
- _result = g1h->attempt_allocation_at_safepoint(_word_size,
+ _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
false /* expect_null_cur_alloc_region */);
if (_result != NULL) {
// If we can successfully allocate before we actually do the
@@ -152,7 +153,7 @@
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
if (_pause_succeeded && _word_size > 0) {
// An allocation had been requested.
- _result = g1h->attempt_allocation_at_safepoint(_word_size,
+ _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
true /* expect_null_cur_alloc_region */);
} else {
assert(_result == NULL, "invariant");
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
+#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
// VM_operations for the G1 collector.
@@ -40,6 +41,7 @@
size_t _word_size;
HeapWord* _result;
bool _pause_succeeded;
+ AllocationContext_t _allocation_context;
public:
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
@@ -49,6 +51,8 @@
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
HeapWord* result() { return _result; }
bool pause_succeeded() { return _pause_succeeded; }
+ void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+ AllocationContext_t allocation_context() { return _allocation_context; }
};
class VM_G1CollectFull: public VM_GC_Operation {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -288,7 +288,7 @@
while (p < to) {
Prefetch::write(p, interval);
oop m = oop(p);
- assert(m->is_oop_or_null(), "check for header");
+ assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
m->push_contents(pm);
p += m->size();
}
@@ -296,7 +296,7 @@
} else {
while (p < to) {
oop m = oop(p);
- assert(m->is_oop_or_null(), "check for header");
+ assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
m->push_contents(pm);
p += m->size();
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -74,10 +74,9 @@
return JNI_ENOMEM;
}
- _reserved = MemRegion((HeapWord*)heap_rs.base(),
- (HeapWord*)(heap_rs.base() + heap_rs.size()));
+ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
- CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
+ CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
barrier_set->initialize();
_barrier_set = barrier_set;
oopDesc::set_bs(_barrier_set);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -2882,7 +2882,7 @@
start_array->allocate_block(addr);
}
oop(addr)->update_contents(cm);
- assert(oop(addr)->is_oop_or_null(), "should be an oop now");
+ assert(oop(addr)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr))));
}
}
}
@@ -3366,7 +3366,7 @@
oop moved_oop = (oop) destination();
moved_oop->update_contents(compaction_manager());
- assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
+ assert(moved_oop->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)));
update_state(words);
assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -582,6 +582,14 @@
}
}
+void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
+ // It is important to do this in a way such that concurrent readers can't
+ // temporarily think something is in the heap. (Seen this happen in asserts.)
+ _reserved.set_word_size(0);
+ _reserved.set_start(start);
+ _reserved.set_end(end);
+}
+
/////////////// Unit tests ///////////////
#ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -85,6 +85,7 @@
friend class VMStructs;
friend class IsGCActiveMark; // Block structured external access to _is_gc_active
+ private:
#ifdef ASSERT
static int _fire_out_of_memory_count;
#endif
@@ -97,8 +98,9 @@
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
bool _defer_initial_card_mark;
+ MemRegion _reserved;
+
protected:
- MemRegion _reserved;
BarrierSet* _barrier_set;
bool _is_gc_active;
uint _n_par_threads;
@@ -211,6 +213,7 @@
// Stop any onging concurrent work and prepare for exit.
virtual void stop() {}
+ void initialize_reserved_region(HeapWord *start, HeapWord *end);
MemRegion reserved_region() const { return _reserved; }
address base() const { return (address)reserved_region().start(); }
@@ -637,6 +640,15 @@
// actual number may be germane.
static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
+ // Copy the current allocation context statistics for the specified contexts.
+ // For each context in contexts, set the corresponding entries in the totals
+ // and accuracy arrays to the current values held by the statistics. Each
+ // array should be of length len.
+ virtual void copy_allocation_context_stats(const jint* contexts,
+ jlong* totals,
+ jbyte* accuracy,
+ jint len) { }
+
/////////////// Unit tests ///////////////
NOT_PRODUCT(static void test_is_in();)
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -54,6 +54,9 @@
case _wb_young_gc:
return "WhiteBox Initiated Young GC";
+ case _update_allocation_context_stats:
+ return "Update Allocation Context Stats";
+
case _no_gc:
return "No GC";
--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -47,6 +47,7 @@
_heap_inspection,
_heap_dump,
_wb_young_gc,
+ _update_allocation_context_stats,
/* implementation independent, but reserved for GC use */
_no_gc,
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -35,7 +35,7 @@
#ifdef ASSERT
#define VERIFY_OOP(o_) \
if (VerifyOops) { \
- assert((oop(o_))->is_oop_or_null(), "Not an oop!"); \
+ assert((oop(o_))->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(o_)))); \
StubRoutines::_verify_oop_count++; \
}
#else
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -123,17 +123,9 @@
return JNI_ENOMEM;
}
- _reserved = MemRegion((HeapWord*)heap_rs.base(),
- (HeapWord*)(heap_rs.base() + heap_rs.size()));
+ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
- // It is important to do this in a way such that concurrent readers can't
- // temporarily think something is in the heap. (Seen this happen in asserts.)
- _reserved.set_word_size(0);
- _reserved.set_start((HeapWord*)heap_rs.base());
- size_t actual_heap_size = heap_rs.size();
- _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
-
- _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
+ _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
set_barrier_set(rem_set()->bs());
_gch = this;
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -473,7 +473,7 @@
_discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
oop discovered = java_lang_ref_Reference::discovered(_ref);
assert(_discovered_addr && discovered->is_oop_or_null(),
- "discovered field is bad");
+ err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
_next = discovered;
_referent_addr = java_lang_ref_Reference::referent_addr(_ref);
_referent = java_lang_ref_Reference::referent(_ref);
@@ -482,7 +482,9 @@
assert(allow_null_referent ?
_referent->is_oop_or_null()
: _referent->is_oop(),
- "bad referent");
+ err_msg("Expected an oop%s for referent field at " PTR_FORMAT,
+ (allow_null_referent ? " or NULL" : ""),
+ p2i(_referent)));
}
void DiscoveredListIterator::remove() {
@@ -630,7 +632,7 @@
oop next = java_lang_ref_Reference::next(iter.obj());
if ((iter.referent() == NULL || iter.is_referent_alive() ||
next != NULL)) {
- assert(next->is_oop_or_null(), "bad next field");
+ assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
// Remove Reference object from list
iter.remove();
// Trace the cohorts
@@ -979,7 +981,7 @@
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
oop next = java_lang_ref_Reference::next(iter.obj());
- assert(next->is_oop_or_null(), "bad next field");
+ assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
// If referent has been cleared or Reference is not active,
// drop it.
if (iter.referent() == NULL || next != NULL) {
@@ -1172,7 +1174,7 @@
HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
const oop discovered = java_lang_ref_Reference::discovered(obj);
- assert(discovered->is_oop_or_null(), "bad discovered field");
+ assert(discovered->is_oop_or_null(), err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
if (discovered != NULL) {
// The reference has already been discovered...
if (TraceReferenceGC) {
--- a/hotspot/src/share/vm/memory/space.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/space.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -331,11 +331,10 @@
CompactibleSpace* space;
HeapWord* threshold;
- CompactPoint(Generation* _gen) :
- gen(_gen), space(NULL), threshold(0) {}
+ CompactPoint(Generation* g = NULL) :
+ gen(g), space(NULL), threshold(0) {}
};
-
// A space that supports compaction operations. This is usually, but not
// necessarily, a space that is normally contiguous. But, for example, a
// free-list-based space whose normal collection is a mark-sweep without
--- a/hotspot/src/share/vm/memory/universe.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -127,6 +127,8 @@
oop Universe::_arithmetic_exception_instance = NULL;
oop Universe::_virtual_machine_error_instance = NULL;
oop Universe::_vm_exception = NULL;
+oop Universe::_allocation_context_notification_obj = NULL;
+
Method* Universe::_throw_illegal_access_error = NULL;
Array<int>* Universe::_the_empty_int_array = NULL;
Array<u2>* Universe::_the_empty_short_array = NULL;
@@ -196,6 +198,7 @@
f->do_oop((oop*)&_main_thread_group);
f->do_oop((oop*)&_system_thread_group);
f->do_oop((oop*)&_vm_exception);
+ f->do_oop((oop*)&_allocation_context_notification_obj);
debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
}
--- a/hotspot/src/share/vm/memory/universe.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/universe.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -178,6 +178,8 @@
// the vm thread.
static oop _vm_exception;
+ static oop _allocation_context_notification_obj;
+
// The particular choice of collected heap.
static CollectedHeap* _collectedHeap;
@@ -307,6 +309,10 @@
static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; }
static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; }
static oop vm_exception() { return _vm_exception; }
+
+ static inline oop allocation_context_notification_obj();
+ static inline void set_allocation_context_notification_obj(oop obj);
+
static Method* throw_illegal_access_error() { return _throw_illegal_access_error; }
static Array<int>* the_empty_int_array() { return _the_empty_int_array; }
--- a/hotspot/src/share/vm/memory/universe.inline.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/universe.inline.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -41,4 +41,12 @@
return type == T_DOUBLE || type == T_LONG;
}
+inline oop Universe::allocation_context_notification_obj() {
+ return _allocation_context_notification_obj;
+}
+
+inline void Universe::set_allocation_context_notification_obj(oop obj) {
+ _allocation_context_notification_obj = obj;
+}
+
#endif // SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP
--- a/hotspot/src/share/vm/prims/whitebox.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -257,7 +257,7 @@
G1CollectedHeap* g1 = G1CollectedHeap::heap();
oop result = JNIHandles::resolve(obj);
const HeapRegion* hr = g1->heap_region_containing(result);
- return hr->isHumongous();
+ return hr->is_humongous();
WB_END
WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
@@ -713,6 +713,12 @@
WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true);
Universe::heap()->collect(GCCause::_last_ditch_collection);
+#if INCLUDE_ALL_GCS
+ if (UseG1GC) {
+ // Needs to be cleared explicitly for G1
+ Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(false);
+ }
+#endif // INCLUDE_ALL_GCS
WB_END
WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o))
@@ -864,6 +870,36 @@
return ret;
}
+void WhiteBox::register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, JNINativeMethod* method_array, int method_count) {
+ ResourceMark rm;
+ ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
+
+ // one by one registration natives for exception catching
+ jclass no_such_method_error_klass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
+ CHECK_JNI_EXCEPTION(env);
+ for (int i = 0, n = method_count; i < n; ++i) {
+ // Skip dummy entries
+ if (method_array[i].fnPtr == NULL) continue;
+ if (env->RegisterNatives(wbclass, &method_array[i], 1) != 0) {
+ jthrowable throwable_obj = env->ExceptionOccurred();
+ if (throwable_obj != NULL) {
+ env->ExceptionClear();
+ if (env->IsInstanceOf(throwable_obj, no_such_method_error_klass)) {
+ // NoSuchMethodError is thrown when a method can't be found or a method is not native.
+ // Ignoring the exception since it is not preventing use of other WhiteBox methods.
+ tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s",
+ method_array[i].name, method_array[i].signature);
+ }
+ } else {
+ // Registration failed unexpectedly.
+ tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered",
+ method_array[i].name, method_array[i].signature);
+ env->UnregisterNatives(wbclass);
+ break;
+ }
+ }
+ }
+}
#define CC (char*)
@@ -971,35 +1007,9 @@
instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass());
Handle loader(ikh->class_loader());
if (loader.is_null()) {
- ResourceMark rm;
- ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
- bool result = true;
- // one by one registration natives for exception catching
- jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
- CHECK_JNI_EXCEPTION(env);
- for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) {
- if (env->RegisterNatives(wbclass, methods + i, 1) != 0) {
- result = false;
- jthrowable throwable_obj = env->ExceptionOccurred();
- if (throwable_obj != NULL) {
- env->ExceptionClear();
- if (env->IsInstanceOf(throwable_obj, exceptionKlass)) {
- // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native
- // ignoring the exception
- tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature);
- }
- } else {
- // register is failed w/o exception or w/ unexpected exception
- tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature);
- env->UnregisterNatives(wbclass);
- break;
- }
- }
- }
-
- if (result) {
- WhiteBox::set_used();
- }
+ WhiteBox::register_methods(env, wbclass, thread, methods, sizeof(methods) / sizeof(methods[0]));
+ WhiteBox::register_extended(env, wbclass, thread);
+ WhiteBox::set_used();
}
}
}
--- a/hotspot/src/share/vm/prims/whitebox.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -29,6 +29,8 @@
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
+#include "oops/symbol.hpp"
+#include "runtime/interfaceSupport.hpp"
// Entry macro to transition from JNI to VM state.
@@ -64,6 +66,9 @@
static bool lookup_bool(const char* field_name, oop object);
static int array_bytes_to_length(size_t bytes);
+ static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
+ JNINativeMethod* method_array, int method_count);
+ static void register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread);
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/prims/whitebox_ext.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "prims/whitebox.hpp"
+
+void WhiteBox::register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread) { }
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -36,6 +36,7 @@
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
+#include "runtime/arguments_ext.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
@@ -88,6 +89,8 @@
bool Arguments::_has_profile = false;
size_t Arguments::_conservative_max_heap_alignment = 0;
uintx Arguments::_min_heap_size = 0;
+uintx Arguments::_min_heap_free_ratio = 0;
+uintx Arguments::_max_heap_free_ratio = 0;
Arguments::Mode Arguments::_mode = _mixed;
bool Arguments::_java_compiler = false;
bool Arguments::_xdebug_mode = false;
@@ -1581,24 +1584,25 @@
CollectorPolicy::compute_heap_alignment());
}
-void Arguments::set_ergonomics_flags() {
-
+void Arguments::select_gc_ergonomically() {
if (os::is_server_class_machine()) {
- // If no other collector is requested explicitly,
- // let the VM select the collector based on
- // machine class and automatic selection policy.
- if (!UseSerialGC &&
- !UseConcMarkSweepGC &&
- !UseG1GC &&
- !UseParNewGC &&
- FLAG_IS_DEFAULT(UseParallelGC)) {
- if (should_auto_select_low_pause_collector()) {
- FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
- } else {
- FLAG_SET_ERGO(bool, UseParallelGC, true);
- }
+ if (should_auto_select_low_pause_collector()) {
+ FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
+ } else {
+ FLAG_SET_ERGO(bool, UseParallelGC, true);
}
}
+}
+
+void Arguments::select_gc() {
+ if (!gc_selected()) {
+ ArgumentsExt::select_gc_ergonomically();
+ }
+}
+
+void Arguments::set_ergonomics_flags() {
+ select_gc();
+
#ifdef COMPILER2
// Shared spaces work fine with other GCs but causes bytecode rewriting
// to be disabled, which hurts interpreter performance and decreases
@@ -1650,9 +1654,11 @@
// unless the user actually sets these flags.
if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+ _min_heap_free_ratio = MinHeapFreeRatio;
}
if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+ _max_heap_free_ratio = MaxHeapFreeRatio;
}
}
@@ -1716,6 +1722,46 @@
}
}
+#if !INCLUDE_ALL_GCS
+#ifdef ASSERT
+static bool verify_serial_gc_flags() {
+ return (UseSerialGC &&
+ !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
+ UseParallelGC || UseParallelOldGC));
+}
+#endif // ASSERT
+#endif // INCLUDE_ALL_GCS
+
+void Arguments::set_gc_specific_flags() {
+#if INCLUDE_ALL_GCS
+ // Set per-collector flags
+ if (UseParallelGC || UseParallelOldGC) {
+ set_parallel_gc_flags();
+ } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
+ set_cms_and_parnew_gc_flags();
+ } else if (UseParNewGC) { // Skipped if CMS is set above
+ set_parnew_gc_flags();
+ } else if (UseG1GC) {
+ set_g1_gc_flags();
+ }
+ check_deprecated_gcs();
+ check_deprecated_gc_flags();
+ if (AssumeMP && !UseSerialGC) {
+ if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+ warning("If the number of processors is expected to increase from one, then"
+ " you should configure the number of parallel GC threads appropriately"
+ " using -XX:ParallelGCThreads=N");
+ }
+ }
+ if (MinHeapFreeRatio == 100) {
+ // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+ FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
+ }
+#else // INCLUDE_ALL_GCS
+ assert(verify_serial_gc_flags(), "SerialGC unset");
+#endif // INCLUDE_ALL_GCS
+}
+
julong Arguments::limit_by_allocatable_memory(julong limit) {
julong max_allocatable;
julong result = limit;
@@ -1957,16 +2003,6 @@
return false;
}
-#if !INCLUDE_ALL_GCS
-#ifdef ASSERT
-static bool verify_serial_gc_flags() {
- return (UseSerialGC &&
- !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
- UseParallelGC || UseParallelOldGC));
-}
-#endif // ASSERT
-#endif // INCLUDE_ALL_GCS
-
// check if do gclog rotation
// +UseGCLogFileRotation is a must,
// no gc log rotation when log file not supplied or
@@ -2045,6 +2081,8 @@
MaxHeapFreeRatio);
return false;
}
+ // This does not set the flag itself, but stores the value in a safe place for later usage.
+ _min_heap_free_ratio = min_heap_free_ratio;
return true;
}
@@ -2059,11 +2097,13 @@
MinHeapFreeRatio);
return false;
}
+ // This does not set the flag itself, but stores the value in a safe place for later usage.
+ _max_heap_free_ratio = max_heap_free_ratio;
return true;
}
// Check consistency of GC selection
-bool Arguments::check_gc_consistency() {
+bool Arguments::check_gc_consistency_user() {
check_gclog_consistency();
bool status = true;
// Ensure that the user has not selected conflicting sets
@@ -2222,7 +2262,7 @@
FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
}
- status = status && check_gc_consistency();
+ status = status && ArgumentsExt::check_gc_consistency_user();
status = status && check_stack_pages();
if (CMSIncrementalMode) {
@@ -2491,8 +2531,6 @@
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
}
- status &= check_vm_args_consistency_ext();
-
return status;
}
@@ -3508,7 +3546,7 @@
}
}
- if (!check_vm_args_consistency()) {
+ if (!ArgumentsExt::check_vm_args_consistency()) {
return JNI_ERR;
}
@@ -3864,7 +3902,7 @@
set_shared_spaces_flags();
// Check the GC selections again.
- if (!check_gc_consistency()) {
+ if (!ArgumentsExt::check_gc_consistency_ergo()) {
return JNI_EINVAL;
}
@@ -3886,33 +3924,7 @@
// Set heap size based on available physical memory
set_heap_size();
-#if INCLUDE_ALL_GCS
- // Set per-collector flags
- if (UseParallelGC || UseParallelOldGC) {
- set_parallel_gc_flags();
- } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
- set_cms_and_parnew_gc_flags();
- } else if (UseParNewGC) { // Skipped if CMS is set above
- set_parnew_gc_flags();
- } else if (UseG1GC) {
- set_g1_gc_flags();
- }
- check_deprecated_gcs();
- check_deprecated_gc_flags();
- if (AssumeMP && !UseSerialGC) {
- if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
- warning("If the number of processors is expected to increase from one, then"
- " you should configure the number of parallel GC threads appropriately"
- " using -XX:ParallelGCThreads=N");
- }
- }
- if (MinHeapFreeRatio == 100) {
- // Keeping the heap 100% free is hard ;-) so limit it to 99%.
- FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
- }
-#else // INCLUDE_ALL_GCS
- assert(verify_serial_gc_flags(), "SerialGC unset");
-#endif // INCLUDE_ALL_GCS
+ set_gc_specific_flags();
// Initialize Metaspace flags and alignments
Metaspace::ergo_initialize();
--- a/hotspot/src/share/vm/runtime/arguments.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -285,7 +285,11 @@
// Value of the conservative maximum heap alignment needed
static size_t _conservative_max_heap_alignment;
- static uintx _min_heap_size;
+ static uintx _min_heap_size;
+
+ // Used to store original flag values
+ static uintx _min_heap_free_ratio;
+ static uintx _max_heap_free_ratio;
// -Xrun arguments
static AgentLibraryList _libraryList;
@@ -336,8 +340,10 @@
static void set_conservative_max_heap_alignment();
static void set_use_compressed_oops();
static void set_use_compressed_klass_ptrs();
+ static void select_gc();
static void set_ergonomics_flags();
static void set_shared_spaces_flags();
+ static void set_gc_specific_flags();
// limits the given memory size by the maximum amount of memory this process is
// currently allowed to allocate or reserve.
static julong limit_by_allocatable_memory(julong size);
@@ -449,6 +455,9 @@
// Adjusts the arguments after the OS have adjusted the arguments
static jint adjust_after_os();
+ static inline bool gc_selected(); // whether a gc has been selected
+ static void select_gc_ergonomically();
+
// Verifies that the given value will fit as a MinHeapFreeRatio. If not, an error
// message is returned in the provided buffer.
static bool verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio);
@@ -458,12 +467,12 @@
static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio);
// Check for consistency in the selection of the garbage collector.
- static bool check_gc_consistency();
+ static bool check_gc_consistency_user(); // Check user-selected gc
+ static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc
static void check_deprecated_gcs();
static void check_deprecated_gc_flags();
// Check consistency or otherwise of VM argument settings
static bool check_vm_args_consistency();
- static bool check_vm_args_consistency_ext();
// Check stack pages settings
static bool check_stack_pages();
// Used by os_solaris
@@ -516,6 +525,10 @@
static uintx min_heap_size() { return _min_heap_size; }
static void set_min_heap_size(uintx v) { _min_heap_size = v; }
+ // Returns the original values of -XX:MinHeapFreeRatio and -XX:MaxHeapFreeRatio
+ static uintx min_heap_free_ratio() { return _min_heap_free_ratio; }
+ static uintx max_heap_free_ratio() { return _max_heap_free_ratio; }
+
// -Xrun
static AgentLibrary* libraries() { return _libraryList.first(); }
static bool init_libraries_at_startup() { return !_libraryList.is_empty(); }
@@ -598,4 +611,13 @@
static bool copy_expand_pid(const char* src, size_t srclen, char* buf, size_t buflen);
};
+bool Arguments::gc_selected() {
+ return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC ||
+ UseParNewGC || UseSerialGC;
+}
+
+bool Arguments::check_gc_consistency_ergo() {
+ return check_gc_consistency_user();
+}
+
#endif // SHARE_VM_RUNTIME_ARGUMENTS_HPP
--- a/hotspot/src/share/vm/runtime/arguments_ext.cpp Thu Sep 25 23:37:41 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/arguments.hpp"
-
-bool Arguments::check_vm_args_consistency_ext() {
- return true;
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/arguments_ext.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
+#define SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/arguments.hpp"
+
+class ArgumentsExt: AllStatic {
+public:
+ static inline void select_gc_ergonomically();
+ static inline bool check_gc_consistency_user();
+ static inline bool check_gc_consistency_ergo();
+ static inline bool check_vm_args_consistency();
+};
+
+void ArgumentsExt::select_gc_ergonomically() {
+ Arguments::select_gc_ergonomically();
+}
+
+bool ArgumentsExt::check_gc_consistency_user() {
+ return Arguments::check_gc_consistency_user();
+}
+
+bool ArgumentsExt::check_gc_consistency_ergo() {
+ return Arguments::check_gc_consistency_ergo();
+}
+
+bool ArgumentsExt::check_vm_args_consistency() {
+ return Arguments::check_vm_args_consistency();
+}
+
+#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
--- a/hotspot/src/share/vm/runtime/serviceThread.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/runtime/serviceThread.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -29,6 +29,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "prims/jvmtiImpl.hpp"
+#include "services/allocationContextService.hpp"
#include "services/gcNotifier.hpp"
#include "services/diagnosticArgument.hpp"
#include "services/diagnosticFramework.hpp"
@@ -86,6 +87,7 @@
bool has_jvmti_events = false;
bool has_gc_notification_event = false;
bool has_dcmd_notification_event = false;
+ bool acs_notify = false;
JvmtiDeferredEvent jvmti_event;
{
// Need state transition ThreadBlockInVM so that this thread
@@ -102,7 +104,8 @@
while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
!(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
!(has_gc_notification_event = GCNotifier::has_event()) &&
- !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) {
+ !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) &&
+ !(acs_notify = AllocationContextService::should_notify())) {
// wait until one of the sensors has pending requests, or there is a
// pending JVMTI event or JMX GC notification to post
Service_lock->wait(Mutex::_no_safepoint_check_flag);
@@ -128,6 +131,10 @@
if(has_dcmd_notification_event) {
DCmdFactory::send_notification(CHECK);
}
+
+ if (acs_notify) {
+ AllocationContextService::notify(CHECK);
+ }
}
}
--- a/hotspot/src/share/vm/runtime/thread.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -826,6 +826,7 @@
st->print("os_prio=%d ", os_prio);
}
st->print("tid=" INTPTR_FORMAT " ", this);
+ ext().print_on(st);
osthread()->print_on(st);
}
debug_only(if (WizardMode) print_owned_locks_on(st);)
@@ -2964,6 +2965,8 @@
// Push the Java priority down to the native thread; needs Threads_lock
Thread::set_priority(this, prio);
+ prepare_ext();
+
// Add the new thread to the Threads list and set it in motion.
// We must have threads lock in order to call Threads::add.
// It is crucial that we do not block before the thread is
@@ -3795,6 +3798,24 @@
}
}
+JavaThread* Threads::find_java_thread_from_java_tid(jlong java_tid) {
+ assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
+
+ JavaThread* java_thread = NULL;
+ // Sequential search for now. Need to do better optimization later.
+ for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ oop tobj = thread->threadObj();
+ if (!thread->is_exiting() &&
+ tobj != NULL &&
+ java_tid == java_lang_Thread::thread_id(tobj)) {
+ java_thread = thread;
+ break;
+ }
+ }
+ return java_thread;
+}
+
+
// Last thread running calls java.lang.Shutdown.shutdown()
void JavaThread::invoke_shutdown_hooks() {
HandleMark hm(this);
--- a/hotspot/src/share/vm/runtime/thread.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -40,6 +40,7 @@
#include "runtime/safepoint.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/threadLocalStorage.hpp"
+#include "runtime/thread_ext.hpp"
#include "runtime/unhandledOops.hpp"
#include "utilities/macros.hpp"
@@ -256,6 +257,8 @@
TRACE_DATA _trace_data; // Thread-local data for tracing
+ ThreadExt _ext;
+
int _vm_operation_started_count; // VM_Operation support
int _vm_operation_completed_count; // VM_Operation support
@@ -409,6 +412,9 @@
TRACE_DATA* trace_data() { return &_trace_data; }
+ const ThreadExt& ext() const { return _ext; }
+ ThreadExt& ext() { return _ext; }
+
// VM operation support
int vm_operation_ticket() { return ++_vm_operation_started_count; }
int vm_operation_completed_count() { return _vm_operation_completed_count; }
@@ -978,6 +984,7 @@
// not specified, use the priority of the thread object. Threads_lock
// must be held while this function is called.
void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
+ void prepare_ext();
void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; }
address saved_exception_pc() { return _saved_exception_pc; }
@@ -1910,6 +1917,8 @@
// Deoptimizes all frames tied to marked nmethods
static void deoptimized_wrt_marked_nmethods();
+ static JavaThread* find_java_thread_from_java_tid(jlong java_tid);
+
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/thread_ext.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/thread_ext.hpp"
+
+void JavaThread::prepare_ext() {
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/thread_ext.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_THREAD_EXT_HPP
+#define SHARE_VM_RUNTIME_THREAD_EXT_HPP
+
+#include "memory/allocation.hpp"
+
+class ThreadExt VALUE_OBJ_CLASS_SPEC {
+public:
+ void print_on(outputStream* st) const {};
+};
+
+#endif // SHARE_VM_RUNTIME_THREAD_EXT_HPP
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -68,6 +68,7 @@
template(G1CollectFull) \
template(G1CollectForAllocation) \
template(G1IncCollectionPause) \
+ template(DestroyAllocationContext) \
template(EnableBiasedLocking) \
template(RevokeBias) \
template(BulkRevokeBias) \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/allocationContextService.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
+#define SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
+
+#include "utilities/exceptions.hpp"
+
+class AllocationContextService: public AllStatic {
+public:
+ static inline bool should_notify();
+ static inline void notify(TRAPS);
+};
+
+bool AllocationContextService::should_notify() { return false; }
+void AllocationContextService::notify(TRAPS) { }
+
+#endif // SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
--- a/hotspot/src/share/vm/services/heapDumper.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/services/heapDumper.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -722,7 +722,7 @@
// reflection and sun.misc.Unsafe classes may have a reference to a
// Klass* so filter it out.
- assert(o->is_oop_or_null(), "should always be an oop");
+ assert(o->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(o)));
writer->write_objectID(o);
break;
}
--- a/hotspot/src/share/vm/services/management.cpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/services/management.cpp Fri Sep 26 06:07:48 2014 +0000
@@ -392,23 +392,6 @@
return (instanceOop) element();
}
-// Helper functions
-static JavaThread* find_java_thread_from_id(jlong thread_id) {
- assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
-
- JavaThread* java_thread = NULL;
- // Sequential search for now. Need to do better optimization later.
- for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
- oop tobj = thread->threadObj();
- if (!thread->is_exiting() &&
- tobj != NULL &&
- thread_id == java_lang_Thread::thread_id(tobj)) {
- java_thread = thread;
- break;
- }
- }
- return java_thread;
-}
static GCMemoryManager* get_gc_memory_manager_from_jobject(jobject mgr, TRAPS) {
if (mgr == NULL) {
@@ -445,6 +428,8 @@
return MemoryService::get_memory_pool(ph);
}
+#endif // INCLUDE_MANAGEMENT
+
static void validate_thread_id_array(typeArrayHandle ids_ah, TRAPS) {
int num_threads = ids_ah->length();
@@ -460,6 +445,8 @@
}
}
+#if INCLUDE_MANAGEMENT
+
static void validate_thread_info_array(objArrayHandle infoArray_h, TRAPS) {
// check if the element of infoArray is of type ThreadInfo class
Klass* threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK);
@@ -823,45 +810,6 @@
return prev;
JVM_END
-// Gets an array containing the amount of memory allocated on the Java
-// heap for a set of threads (in bytes). Each element of the array is
-// the amount of memory allocated for the thread ID specified in the
-// corresponding entry in the given array of thread IDs; or -1 if the
-// thread does not exist or has terminated.
-JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
- jlongArray sizeArray))
- // Check if threads is null
- if (ids == NULL || sizeArray == NULL) {
- THROW(vmSymbols::java_lang_NullPointerException());
- }
-
- ResourceMark rm(THREAD);
- typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
- typeArrayHandle ids_ah(THREAD, ta);
-
- typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
- typeArrayHandle sizeArray_h(THREAD, sa);
-
- // validate the thread id array
- validate_thread_id_array(ids_ah, CHECK);
-
- // sizeArray must be of the same length as the given array of thread IDs
- int num_threads = ids_ah->length();
- if (num_threads != sizeArray_h->length()) {
- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
- "The length of the given long array does not match the length of "
- "the given array of thread IDs");
- }
-
- MutexLockerEx ml(Threads_lock);
- for (int i = 0; i < num_threads; i++) {
- JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
- if (java_thread != NULL) {
- sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
- }
- }
-JVM_END
-
// Returns a java/lang/management/MemoryUsage object representing
// the memory usage for the heap or non-heap memory.
JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
@@ -1167,7 +1115,7 @@
MutexLockerEx ml(Threads_lock);
for (int i = 0; i < num_threads; i++) {
jlong tid = ids_ah->long_at(i);
- JavaThread* jt = find_java_thread_from_id(tid);
+ JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
oop thread_obj = (jt != NULL ? jt->threadObj() : (oop)NULL);
instanceHandle threadObj_h(THREAD, (instanceOop) thread_obj);
thread_handle_array->append(threadObj_h);
@@ -1244,7 +1192,7 @@
MutexLockerEx ml(Threads_lock);
for (int i = 0; i < num_threads; i++) {
jlong tid = ids_ah->long_at(i);
- JavaThread* jt = find_java_thread_from_id(tid);
+ JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
ThreadSnapshot* ts;
if (jt == NULL) {
// if the thread does not exist or now it is terminated,
@@ -1488,7 +1436,7 @@
}
} else {
// reset contention statistics for a given thread
- JavaThread* java_thread = find_java_thread_from_id(tid);
+ JavaThread* java_thread = Threads::find_java_thread_from_java_tid(tid);
if (java_thread == NULL) {
return false;
}
@@ -1557,7 +1505,7 @@
return os::current_thread_cpu_time();
} else {
MutexLockerEx ml(Threads_lock);
- java_thread = find_java_thread_from_id(thread_id);
+ java_thread = Threads::find_java_thread_from_java_tid(thread_id);
if (java_thread != NULL) {
return os::thread_cpu_time((Thread*) java_thread);
}
@@ -1565,78 +1513,6 @@
return -1;
JVM_END
-// Returns the CPU time consumed by a given thread (in nanoseconds).
-// If thread_id == 0, CPU time for the current thread is returned.
-// If user_sys_cpu_time = true, user level and system CPU time of
-// a given thread is returned; otherwise, only user level CPU time
-// is returned.
-JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
- if (!os::is_thread_cpu_time_supported()) {
- return -1;
- }
-
- if (thread_id < 0) {
- THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
- "Invalid thread ID", -1);
- }
-
- JavaThread* java_thread = NULL;
- if (thread_id == 0) {
- // current thread
- return os::current_thread_cpu_time(user_sys_cpu_time != 0);
- } else {
- MutexLockerEx ml(Threads_lock);
- java_thread = find_java_thread_from_id(thread_id);
- if (java_thread != NULL) {
- return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
- }
- }
- return -1;
-JVM_END
-
-// Gets an array containing the CPU times consumed by a set of threads
-// (in nanoseconds). Each element of the array is the CPU time for the
-// thread ID specified in the corresponding entry in the given array
-// of thread IDs; or -1 if the thread does not exist or has terminated.
-// If user_sys_cpu_time = true, the sum of user level and system CPU time
-// for the given thread is returned; otherwise, only user level CPU time
-// is returned.
-JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
- jlongArray timeArray,
- jboolean user_sys_cpu_time))
- // Check if threads is null
- if (ids == NULL || timeArray == NULL) {
- THROW(vmSymbols::java_lang_NullPointerException());
- }
-
- ResourceMark rm(THREAD);
- typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
- typeArrayHandle ids_ah(THREAD, ta);
-
- typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
- typeArrayHandle timeArray_h(THREAD, tia);
-
- // validate the thread id array
- validate_thread_id_array(ids_ah, CHECK);
-
- // timeArray must be of the same length as the given array of thread IDs
- int num_threads = ids_ah->length();
- if (num_threads != timeArray_h->length()) {
- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
- "The length of the given long array does not match the length of "
- "the given array of thread IDs");
- }
-
- MutexLockerEx ml(Threads_lock);
- for (int i = 0; i < num_threads; i++) {
- JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
- if (java_thread != NULL) {
- timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
- user_sys_cpu_time != 0));
- }
- }
-JVM_END
-
// Returns a String array of all VM global flag names
JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
// last flag entry is always NULL, so subtract 1
@@ -2331,7 +2207,122 @@
return (jlong)(((double)ticks / (double)os::elapsed_frequency())
* (double)1000.0);
}
+#endif // INCLUDE_MANAGEMENT
+// Gets an array containing the amount of memory allocated on the Java
+// heap for a set of threads (in bytes). Each element of the array is
+// the amount of memory allocated for the thread ID specified in the
+// corresponding entry in the given array of thread IDs; or -1 if the
+// thread does not exist or has terminated.
+JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
+ jlongArray sizeArray))
+ // Check if threads is null
+ if (ids == NULL || sizeArray == NULL) {
+ THROW(vmSymbols::java_lang_NullPointerException());
+ }
+
+ ResourceMark rm(THREAD);
+ typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+ typeArrayHandle ids_ah(THREAD, ta);
+
+ typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
+ typeArrayHandle sizeArray_h(THREAD, sa);
+
+ // validate the thread id array
+ validate_thread_id_array(ids_ah, CHECK);
+
+ // sizeArray must be of the same length as the given array of thread IDs
+ int num_threads = ids_ah->length();
+ if (num_threads != sizeArray_h->length()) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "The length of the given long array does not match the length of "
+ "the given array of thread IDs");
+ }
+
+ MutexLockerEx ml(Threads_lock);
+ for (int i = 0; i < num_threads; i++) {
+ JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+ if (java_thread != NULL) {
+ sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
+ }
+ }
+JVM_END
+
+// Returns the CPU time consumed by a given thread (in nanoseconds).
+// If thread_id == 0, CPU time for the current thread is returned.
+// If user_sys_cpu_time = true, user level and system CPU time of
+// a given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
+ if (!os::is_thread_cpu_time_supported()) {
+ return -1;
+ }
+
+ if (thread_id < 0) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid thread ID", -1);
+ }
+
+ JavaThread* java_thread = NULL;
+ if (thread_id == 0) {
+ // current thread
+ return os::current_thread_cpu_time(user_sys_cpu_time != 0);
+ } else {
+ MutexLockerEx ml(Threads_lock);
+ java_thread = Threads::find_java_thread_from_java_tid(thread_id);
+ if (java_thread != NULL) {
+ return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
+ }
+ }
+ return -1;
+JVM_END
+
+// Gets an array containing the CPU times consumed by a set of threads
+// (in nanoseconds). Each element of the array is the CPU time for the
+// thread ID specified in the corresponding entry in the given array
+// of thread IDs; or -1 if the thread does not exist or has terminated.
+// If user_sys_cpu_time = true, the sum of user level and system CPU time
+// for the given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
+ jlongArray timeArray,
+ jboolean user_sys_cpu_time))
+ // Check if threads is null
+ if (ids == NULL || timeArray == NULL) {
+ THROW(vmSymbols::java_lang_NullPointerException());
+ }
+
+ ResourceMark rm(THREAD);
+ typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+ typeArrayHandle ids_ah(THREAD, ta);
+
+ typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
+ typeArrayHandle timeArray_h(THREAD, tia);
+
+ // validate the thread id array
+ validate_thread_id_array(ids_ah, CHECK);
+
+ // timeArray must be of the same length as the given array of thread IDs
+ int num_threads = ids_ah->length();
+ if (num_threads != timeArray_h->length()) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "The length of the given long array does not match the length of "
+ "the given array of thread IDs");
+ }
+
+ MutexLockerEx ml(Threads_lock);
+ for (int i = 0; i < num_threads; i++) {
+ JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+ if (java_thread != NULL) {
+ timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
+ user_sys_cpu_time != 0));
+ }
+ }
+JVM_END
+
+
+
+#if INCLUDE_MANAGEMENT
const struct jmmInterface_1_ jmm_interface = {
NULL,
NULL,
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp Fri Sep 26 06:07:48 2014 +0000
@@ -331,7 +331,7 @@
// index, &_elems[index], _elems[index]);
E* t = (E*)&_elems[index]; // cast away volatility
oop* p = (oop*)t;
- assert((*t)->is_oop_or_null(), "Not an oop or null");
+ assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t)));
f->do_oop(p);
}
// tty->print_cr("END OopTaskQueue::oops_do");
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Sep 25 23:37:41 2014 +0200
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Fri Sep 26 06:07:48 2014 +0000
@@ -30,6 +30,7 @@
import java.util.function.Function;
import java.util.stream.Stream;
import java.security.BasicPermission;
+
import sun.hotspot.parser.DiagnosticCommand;
public class WhiteBox {
@@ -168,6 +169,12 @@
// CPU features
public native String getCPUFeatures();
+ // Native extensions
+ public native long getHeapUsageForContext(int context);
+ public native long getHeapRegionCountForContext(int context);
+ public native int getContextForObject(Object obj);
+ public native void printRegionInfo(int context);
+
// VM flags
public native void setBooleanVMFlag(String name, boolean value);
public native void setIntxVMFlag(String name, long value);