--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -3632,23 +3632,11 @@
if (satb_log_enqueue_with_frame == 0) {
generate_satb_log_enqueue(with_frame);
assert(satb_log_enqueue_with_frame != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated with-frame satb enqueue:");
- Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
- satb_log_enqueue_with_frame_end,
- tty);
- }
}
} else {
if (satb_log_enqueue_frameless == 0) {
generate_satb_log_enqueue(with_frame);
assert(satb_log_enqueue_frameless != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated frameless satb enqueue:");
- Disassembler::decode((u_char*)satb_log_enqueue_frameless,
- satb_log_enqueue_frameless_end,
- tty);
- }
}
}
}
@@ -3841,12 +3829,6 @@
if (dirty_card_log_enqueue == 0) {
generate_dirty_card_log_enqueue(byte_map_base);
assert(dirty_card_log_enqueue != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated dirty_card enqueue:");
- Disassembler::decode((u_char*)dirty_card_log_enqueue,
- dirty_card_log_enqueue_end,
- tty);
- }
}
}
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -1181,8 +1181,6 @@
// Accessors
static oop target( oop site);
static void set_target( oop site, oop target);
-
- static volatile oop target_volatile(oop site);
static void set_target_volatile(oop site, oop target);
// Testers
--- a/hotspot/src/share/vm/classfile/javaClasses.inline.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.inline.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -29,10 +29,6 @@
#include "oops/oop.inline.hpp"
#include "oops/oopsHierarchy.hpp"
-inline volatile oop java_lang_invoke_CallSite::target_volatile(oop site) {
- return oop((oopDesc *)(site->obj_field_volatile(_target_offset)));
-}
-
inline void java_lang_invoke_CallSite::set_target_volatile(oop site, oop target) {
site->obj_field_put_volatile(_target_offset, target);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -943,13 +943,6 @@
_has_aborted = false;
-#ifndef PRODUCT
- if (G1PrintReachableAtInitialMark) {
- print_reachable("at-cycle-start",
- VerifyOption_G1UsePrevMarking, true /* all */);
- }
-#endif
-
// Initialize marking structures. This has to be done in a STW phase.
reset();
@@ -2684,166 +2677,6 @@
print_stats();
}
-#ifndef PRODUCT
-
-class PrintReachableOopClosure: public OopClosure {
-private:
- G1CollectedHeap* _g1h;
- outputStream* _out;
- VerifyOption _vo;
- bool _all;
-
-public:
- PrintReachableOopClosure(outputStream* out,
- VerifyOption vo,
- bool all) :
- _g1h(G1CollectedHeap::heap()),
- _out(out), _vo(vo), _all(all) { }
-
- void do_oop(narrowOop* p) { do_oop_work(p); }
- void do_oop( oop* p) { do_oop_work(p); }
-
- template <class T> void do_oop_work(T* p) {
- oop obj = oopDesc::load_decode_heap_oop(p);
- const char* str = NULL;
- const char* str2 = "";
-
- if (obj == NULL) {
- str = "";
- } else if (!_g1h->is_in_g1_reserved(obj)) {
- str = " O";
- } else {
- HeapRegion* hr = _g1h->heap_region_containing(obj);
- bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
- bool marked = _g1h->is_marked(obj, _vo);
-
- if (over_tams) {
- str = " >";
- if (marked) {
- str2 = " AND MARKED";
- }
- } else if (marked) {
- str = " M";
- } else {
- str = " NOT";
- }
- }
-
- _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
- p2i(p), p2i((void*) obj), str, str2);
- }
-};
-
-class PrintReachableObjectClosure : public ObjectClosure {
-private:
- G1CollectedHeap* _g1h;
- outputStream* _out;
- VerifyOption _vo;
- bool _all;
- HeapRegion* _hr;
-
-public:
- PrintReachableObjectClosure(outputStream* out,
- VerifyOption vo,
- bool all,
- HeapRegion* hr) :
- _g1h(G1CollectedHeap::heap()),
- _out(out), _vo(vo), _all(all), _hr(hr) { }
-
- void do_object(oop o) {
- bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
- bool marked = _g1h->is_marked(o, _vo);
- bool print_it = _all || over_tams || marked;
-
- if (print_it) {
- _out->print_cr(" "PTR_FORMAT"%s",
- p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
- PrintReachableOopClosure oopCl(_out, _vo, _all);
- o->oop_iterate_no_header(&oopCl);
- }
- }
-};
-
-class PrintReachableRegionClosure : public HeapRegionClosure {
-private:
- G1CollectedHeap* _g1h;
- outputStream* _out;
- VerifyOption _vo;
- bool _all;
-
-public:
- bool doHeapRegion(HeapRegion* hr) {
- HeapWord* b = hr->bottom();
- HeapWord* e = hr->end();
- HeapWord* t = hr->top();
- HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
- _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
- "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
- _out->cr();
-
- HeapWord* from = b;
- HeapWord* to = t;
-
- if (to > from) {
- _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
- _out->cr();
- PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
- hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
- _out->cr();
- }
-
- return false;
- }
-
- PrintReachableRegionClosure(outputStream* out,
- VerifyOption vo,
- bool all) :
- _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
-};
-
-void ConcurrentMark::print_reachable(const char* str,
- VerifyOption vo,
- bool all) {
- gclog_or_tty->cr();
- gclog_or_tty->print_cr("== Doing heap dump... ");
-
- if (G1PrintReachableBaseFile == NULL) {
- gclog_or_tty->print_cr(" #### error: no base file defined");
- return;
- }
-
- if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
- (JVM_MAXPATHLEN - 1)) {
- gclog_or_tty->print_cr(" #### error: file name too long");
- return;
- }
-
- char file_name[JVM_MAXPATHLEN];
- sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
- gclog_or_tty->print_cr(" dumping to file %s", file_name);
-
- fileStream fout(file_name);
- if (!fout.is_open()) {
- gclog_or_tty->print_cr(" #### error: could not open file");
- return;
- }
-
- outputStream* out = &fout;
- out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
- out->cr();
-
- out->print_cr("--- ITERATING OVER REGIONS");
- out->cr();
- PrintReachableRegionClosure rcl(out, vo, all);
- _g1h->heap_region_iterate(&rcl);
- out->cr();
-
- gclog_or_tty->print_cr(" done");
- gclog_or_tty->flush();
-}
-
-#endif // PRODUCT
-
void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
@@ -3887,12 +3720,11 @@
CMObjectClosure oc(this);
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
- satb_mq_set.set_closure(_worker_id, &oc);
// This keeps claiming and applying the closure to completed buffers
// until we run out of buffers or we need to abort.
while (!has_aborted() &&
- satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) {
+ satb_mq_set.apply_closure_to_completed_buffer(&oc)) {
if (_cm->verbose_medium()) {
gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
}
@@ -3906,8 +3738,6 @@
concurrent() ||
satb_mq_set.completed_buffers_num() == 0, "invariant");
- satb_mq_set.set_closure(_worker_id, NULL);
-
// again, this was a potentially expensive operation, decrease the
// limits to get the regular clock call early
decrease_limits();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -110,15 +110,15 @@
_retained_old_gc_alloc_region = NULL;
}
-G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
- ParGCAllocBuffer(gclab_word_size), _retired(true) { }
+G1PLAB::G1PLAB(size_t gclab_word_size) :
+ PLAB(gclab_word_size), _retired(true) { }
HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
- G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
+ G1PLAB* alloc_buf = alloc_buffer(dest, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire();
@@ -151,7 +151,7 @@
void G1DefaultParGCAllocator::retire_alloc_buffers() {
for (uint state = 0; state < InCSetState::Num; state++) {
- G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
+ G1PLAB* const buf = _alloc_buffers[state];
if (buf != NULL) {
add_to_alloc_buffer_waste(buf->words_remaining());
buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -28,7 +28,7 @@
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1InCSetState.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/plab.hpp"
#include "gc_interface/collectedHeap.hpp"
class EvacuationInfo;
@@ -147,18 +147,18 @@
}
};
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
+class G1PLAB: public PLAB {
private:
bool _retired;
public:
- G1ParGCAllocBuffer(size_t gclab_word_size);
- virtual ~G1ParGCAllocBuffer() {
+ G1PLAB(size_t gclab_word_size);
+ virtual ~G1PLAB() {
guarantee(_retired, "Allocation buffer has not been retired");
}
virtual void set_buf(HeapWord* buf) {
- ParGCAllocBuffer::set_buf(buf);
+ PLAB::set_buf(buf);
_retired = false;
}
@@ -166,7 +166,7 @@
if (_retired) {
return;
}
- ParGCAllocBuffer::retire();
+ PLAB::retire();
_retired = true;
}
};
@@ -190,7 +190,7 @@
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
virtual void retire_alloc_buffers() = 0;
- virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
+ virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
// there are no restrictions on survivor alignment.
@@ -229,7 +229,7 @@
HeapWord* plab_allocate(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
- G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
+ G1PLAB* buffer = alloc_buffer(dest, context);
if (_survivor_alignment_bytes == 0) {
return buffer->allocate(word_sz);
} else {
@@ -259,14 +259,14 @@
};
class G1DefaultParGCAllocator : public G1ParGCAllocator {
- G1ParGCAllocBuffer _surviving_alloc_buffer;
- G1ParGCAllocBuffer _tenured_alloc_buffer;
- G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
+ G1PLAB _surviving_alloc_buffer;
+ G1PLAB _tenured_alloc_buffer;
+ G1PLAB* _alloc_buffers[InCSetState::Num];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
- virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
+ virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
assert(dest.is_valid(),
err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
assert(_alloc_buffers[dest.value()] != NULL,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -410,10 +410,6 @@
return !hr->is_humongous();
}
-// Private class members.
-
-G1CollectedHeap* G1CollectedHeap::_g1h;
-
// Private methods.
HeapRegion*
@@ -1769,14 +1765,12 @@
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
- _g1h = this;
-
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
_workers->initialize_workers();
- _allocator = G1Allocator::create_allocator(_g1h);
+ _allocator = G1Allocator::create_allocator(this);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
int n_queues = MAX2((int)ParallelGCThreads, 1);
@@ -1939,8 +1933,6 @@
_bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
- _g1h = this;
-
{
HeapWord* start = _hrm.reserved().start();
HeapWord* end = _hrm.reserved().end();
@@ -3109,12 +3101,6 @@
// print_extended_on() instead of print_on().
print_extended_on(gclog_or_tty);
gclog_or_tty->cr();
-#ifndef PRODUCT
- if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
- concurrent_mark()->print_reachable("at-verification-failure",
- vo, false /* all */);
- }
-#endif
gclog_or_tty->flush();
}
guarantee(!failures, "there should not have been any failures");
@@ -3320,9 +3306,10 @@
#endif // PRODUCT
G1CollectedHeap* G1CollectedHeap::heap() {
- assert(_g1h != NULL, "Uninitialized access to G1CollectedHeap::heap()");
- assert(_g1h->kind() == CollectedHeap::G1CollectedHeap, "Not a G1 heap");
- return _g1h;
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
+ assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
+ return (G1CollectedHeap*)heap;
}
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
@@ -4877,7 +4864,7 @@
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
bool process_strings, bool process_symbols) {
{
- uint n_workers = _g1h->workers()->active_workers();
+ uint n_workers = workers()->active_workers();
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
set_par_threads(n_workers);
workers()->run_task(&g1_unlink_task);
@@ -4909,7 +4896,7 @@
void G1CollectedHeap::redirty_logged_cards() {
double redirty_logged_cards_start = os::elapsedTime();
- uint n_workers = _g1h->workers()->active_workers();
+ uint n_workers = workers()->active_workers();
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
dirty_card_queue_set().reset_for_par_iteration();
@@ -5342,7 +5329,7 @@
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
- if (_g1h->g1_policy()->during_initial_mark_pause()) {
+ if (g1_policy()->during_initial_mark_pause()) {
// We also need to mark copied objects.
copy_non_heap_cl = ©_mark_non_heap_cl;
}
@@ -6097,12 +6084,12 @@
HeapRegionSetCount empty_set;
remove_from_old_sets(empty_set, cl.humongous_free_count());
- G1HRPrinter* hr_printer = _g1h->hr_printer();
- if (hr_printer->is_active()) {
+ G1HRPrinter* hrp = hr_printer();
+ if (hrp->is_active()) {
FreeRegionListIterator iter(&local_cleanup_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
- hr_printer->cleanup(hr);
+ hrp->cleanup(hr);
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -39,7 +39,6 @@
#include "gc_implementation/g1/heapRegionManager.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/barrierSet.hpp"
#include "memory/memRegion.hpp"
@@ -202,9 +201,6 @@
friend class G1CheckCSetFastTableClosure;
private:
- // The one and only G1CollectedHeap, so static functions can find it.
- static G1CollectedHeap* _g1h;
-
FlexibleWorkGang* _workers;
static size_t _humongous_object_threshold_in_words;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -119,7 +119,6 @@
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace(" 1");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -199,7 +198,6 @@
// tracking expects us to do so. See comment under phase4.
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace("2");
prepare_compaction();
}
@@ -233,7 +231,6 @@
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace("3");
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
@@ -295,7 +292,6 @@
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace("4");
G1SpaceCompactClosure blk;
g1h->heap_region_iterate(&blk);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -41,15 +41,6 @@
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
\
- develop(bool, G1PrintReachableAtInitialMark, false, \
- "Reachable object dump at the initial mark pause") \
- \
- develop(bool, G1VerifyDuringGCPrintReachable, false, \
- "If conc mark verification fails, dump reachable objects") \
- \
- develop(ccstr, G1PrintReachableBaseFile, NULL, \
- "The base file name for the reachable object dumps") \
- \
develop(bool, G1TraceMarkStackOverflow, false, \
"If true, extra debugging code for CM restart for ovflw.") \
\
@@ -99,9 +90,6 @@
"the buffer will be enqueued for processing. A value of 0 " \
"specifies that mutator threads should not do such filtering.") \
\
- develop(bool, G1SATBPrintStubs, false, \
- "If true, print generated stubs for the SATB barrier") \
- \
experimental(intx, G1ExpandByPercentOfAvailable, 20, \
"When expanding, % of uncommitted space to claim.") \
\
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -44,6 +44,7 @@
// The solution is to remove this method from the definition
// of a Space.
+class G1CollectedHeap;
class HeapRegionRemSet;
class HeapRegionRemSetIterator;
class HeapRegion;
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,30 +33,67 @@
#include "runtime/vmThread.hpp"
void ObjPtrQueue::flush() {
- // The buffer might contain refs into the CSet. We have to filter it
- // first before we flush it, otherwise we might end up with an
- // enqueued buffer with refs into the CSet which breaks our invariants.
+ // Filter now to possibly save work later. If filtering empties the
+ // buffer then flush_impl can deallocate the buffer.
filter();
flush_impl();
}
-// This method removes entries from an SATB buffer that will not be
-// useful to the concurrent marking threads. An entry is removed if it
-// satisfies one of the following conditions:
+// Return true if a SATB buffer entry refers to an object that
+// requires marking.
+//
+// The entry must point into the G1 heap. In particular, it must not
+// be a NULL pointer. NULL pointers are pre-filtered and never
+// inserted into a SATB buffer.
+//
+// An entry that is below the NTAMS pointer for the containing heap
+// region requires marking. Such an entry must point to a valid object.
+//
+// An entry that is at least the NTAMS pointer for the containing heap
+// region might be any of the following, none of which should be marked.
+//
+// * A reference to an object allocated since marking started.
+// According to SATB, such objects are implicitly kept live and do
+// not need to be dealt with via SATB buffer processing.
+//
+// * A reference to a young generation object. Young objects are
+// handled separately and are not marked by concurrent marking.
+//
+// * A stale reference to a young generation object. If a young
+// generation object reference is recorded and not filtered out
+// before being moved by a young collection, the reference becomes
+// stale.
//
-// * it points to an object outside the G1 heap (G1's concurrent
-// marking only visits objects inside the G1 heap),
-// * it points to an object that has been allocated since marking
-// started (according to SATB those objects do not need to be
-// visited during marking), or
-// * it points to an object that has already been marked (no need to
-// process it again).
+// * A stale reference to an eagerly reclaimed humongous object. If a
+// humongous object is recorded and then reclaimed, the reference
+// becomes stale.
//
-// The rest of the entries will be retained and are compacted towards
-// the top of the buffer. Note that, because we do not allow old
-// regions in the CSet during marking, all objects on the CSet regions
-// are young (eden or survivors) and therefore implicitly live. So any
-// references into the CSet will be removed during filtering.
+// The stale reference cases are implicitly handled by the NTAMS
+// comparison. Because of the possibility of stale references, buffer
+// processing must be somewhat circumspect and not assume entries
+// in an unfiltered buffer refer to valid objects.
+
+inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
+ // Includes rejection of NULL pointers.
+ assert(heap->is_in_reserved(entry),
+ err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)));
+
+ HeapRegion* region = heap->heap_region_containing_raw(entry);
+ assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry)));
+ if (entry >= region->next_top_at_mark_start()) {
+ return false;
+ }
+
+ assert(((oop)entry)->is_oop(true /* ignore mark word */),
+ err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)));
+
+ return true;
+}
+
+// This method removes entries from a SATB buffer that will not be
+// useful to the concurrent marking threads. Entries are retained if
+// they require marking and are not already marked. Retained entries
+// are compacted toward the top of the buffer.
void ObjPtrQueue::filter() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -78,26 +115,25 @@
assert(i > 0, "we should have at least one more entry to process");
i -= oopSize;
debug_only(entries += 1;)
- oop* p = (oop*) &buf[byte_index_to_index((int) i)];
- oop obj = *p;
+ void** p = &buf[byte_index_to_index((int) i)];
+ void* entry = *p;
// NULL the entry so that unused parts of the buffer contain NULLs
// at the end. If we are going to retain it we will copy it to its
// final place. If we have retained all entries we have visited so
// far, we'll just end up copying it to the same place.
*p = NULL;
- bool retain = g1h->is_obj_ill(obj);
- if (retain) {
+ if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
assert(new_index > 0, "we should not have already filled up the buffer");
new_index -= oopSize;
assert(new_index >= i,
"new_index should never be below i, as we always compact 'up'");
- oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
+ void** new_p = &buf[byte_index_to_index((int) new_index)];
assert(new_p >= p, "the destination location should never be below "
"the source as we always compact 'up'");
assert(*new_p == NULL,
"we should have already cleared the destination location");
- *new_p = obj;
+ *new_p = entry;
debug_only(retained += 1;)
}
}
@@ -184,23 +220,12 @@
}
#endif // PRODUCT
-#ifdef ASSERT
-void ObjPtrQueue::verify_oops_in_buffer() {
- if (_buf == NULL) return;
- for (size_t i = _index; i < _sz; i += oopSize) {
- oop obj = (oop)_buf[byte_index_to_index((int)i)];
- assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
- "Not an oop");
- }
-}
-#endif
-
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
SATBMarkQueueSet::SATBMarkQueueSet() :
- PtrQueueSet(), _closures(NULL),
+ PtrQueueSet(),
_shared_satb_queue(this, true /*perm*/) { }
void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
@@ -208,11 +233,9 @@
Mutex* lock) {
PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
_shared_satb_queue.set_lock(lock);
- _closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC);
}
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
- DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
t->satb_mark_queue().handle_zero_index();
}
@@ -272,13 +295,7 @@
shared_satb_queue()->filter();
}
-void SATBMarkQueueSet::set_closure(uint worker, ObjectClosure* closure) {
- assert(_closures != NULL, "Precondition");
- assert(worker < ParallelGCThreads, "Worker index must be in range [0...ParallelGCThreads)");
- _closures[worker] = closure;
-}
-
-bool SATBMarkQueueSet::apply_closure_to_completed_buffer(uint worker) {
+bool SATBMarkQueueSet::apply_closure_to_completed_buffer(ObjectClosure* cl) {
BufferNode* nd = NULL;
{
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
@@ -290,7 +307,6 @@
if (_n_completed_buffers == 0) _process_completed = false;
}
}
- ObjectClosure* cl = _closures[worker];
if (nd != NULL) {
void **buf = BufferNode::make_buffer_from_node(nd);
ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -72,13 +72,9 @@
void print(const char* name);
static void print(const char* name, void** buf, size_t index, size_t sz);
#endif // PRODUCT
-
- void verify_oops_in_buffer() NOT_DEBUG_RETURN;
};
class SATBMarkQueueSet: public PtrQueueSet {
- ObjectClosure** _closures; // One per ParGCThread.
-
ObjPtrQueue _shared_satb_queue;
#ifdef ASSERT
@@ -104,16 +100,10 @@
// Filter all the currently-active SATB buffers.
void filter_thread_buffers();
- // Register closure for the given worker thread. The "apply_closure_to_completed_buffer"
- // method will apply this closure to a completed buffer, and "iterate_closure_all_threads"
- // applies it to partially-filled buffers (the latter should only be done
- // with the world stopped).
- void set_closure(uint worker, ObjectClosure* closure);
-
// If there exists some completed buffer, pop it, then apply the
- // registered closure to all its elements, and return true. If no
+ // closure to all its elements, and return true. If no
// completed buffers exist, return false.
- bool apply_closure_to_completed_buffer(uint worker);
+ bool apply_closure_to_completed_buffer(ObjectClosure* closure);
// Apply the given closure on enqueued and currently-active buffers
// respectively. Both methods are read-only, i.e., they do not
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -34,7 +34,7 @@
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.inline.hpp"
+#include "gc_implementation/shared/plab.inline.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/defNewGeneration.inline.hpp"
#include "memory/genCollectedHeap.hpp"
@@ -226,7 +226,7 @@
// buffer.
HeapWord* obj = NULL;
if (!_to_space_full) {
- ParGCAllocBuffer* const plab = to_space_alloc_buffer();
+ PLAB* const plab = to_space_alloc_buffer();
Space* const sp = to_space();
if (word_sz * 100 <
ParallelGCBufferWastePct * plab->word_sz()) {
@@ -236,7 +236,7 @@
HeapWord* buf_space = sp->par_allocate(buf_size);
if (buf_space == NULL) {
const size_t min_bytes =
- ParGCAllocBuffer::min_size() << LogHeapWordSize;
+ PLAB::min_size() << LogHeapWordSize;
size_t free_bytes = sp->free();
while(buf_space == NULL && free_bytes >= min_bytes) {
buf_size = free_bytes >> LogHeapWordSize;
@@ -252,7 +252,7 @@
record_survivor_plab(buf_space, buf_size);
obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
// Note that we cannot compare buf_size < word_sz below
- // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
+ // because of AlignmentReserve (see PLAB::allocate()).
assert(obj != NULL || plab->words_remaining() < word_sz,
"Else should have been able to allocate");
// It's conceivable that we may be able to use the
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -27,7 +27,7 @@
#include "gc_implementation/parNew/parOopClosures.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/plab.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/padded.hpp"
@@ -65,7 +65,7 @@
ObjToScanQueue *_work_queue;
Stack<oop, mtGC>* const _overflow_stack;
- ParGCAllocBuffer _to_space_alloc_buffer;
+ PLAB _to_space_alloc_buffer;
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
@@ -140,7 +140,7 @@
ObjToScanQueue* work_queue() { return _work_queue; }
- ParGCAllocBuffer* to_space_alloc_buffer() {
+ PLAB* to_space_alloc_buffer() {
return &_to_space_alloc_buffer;
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -49,16 +49,11 @@
PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
-ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
jint ParallelScavengeHeap::initialize() {
CollectedHeap::pre_initialize();
- // Initialize collector policy
- _collector_policy = new GenerationSizer();
- _collector_policy->initialize_all();
-
const size_t heap_size = _collector_policy->max_heap_byte_size();
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
@@ -89,7 +84,6 @@
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
- _psh = this;
_gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
_old_gen = _gens->old_gen();
@@ -634,9 +628,10 @@
}
ParallelScavengeHeap* ParallelScavengeHeap::heap() {
- assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
- assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
- return _psh;
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
+ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
+ return (ParallelScavengeHeap*)heap;
}
// Before delegating the resize to the young generation,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -53,8 +53,6 @@
static PSAdaptiveSizePolicy* _size_policy;
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
- static ParallelScavengeHeap* _psh;
-
GenerationSizer* _collector_policy;
// Collection of generations that are adjacent in the
@@ -76,7 +74,8 @@
HeapWord* mem_allocate_old_gen(size_t size);
public:
- ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }
+ ParallelScavengeHeap(GenerationSizer* policy) :
+ CollectedHeap(), _collector_policy(policy), _death_march_count(0) { }
// For use by VM operations
enum CollectionType {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -510,7 +510,6 @@
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace(" 1");
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
@@ -570,7 +569,6 @@
void PSMarkSweep::mark_sweep_phase2() {
GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("2");
// Now all live objects are marked, compute the new object addresses.
@@ -598,7 +596,6 @@
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("3");
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
@@ -639,7 +636,6 @@
void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap");
GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("4");
// All pointers are now adjusted, move objects accordingly
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,11 +60,29 @@
// Used when initializing the _name field.
static inline const char* select_name();
+#ifdef ASSERT
+ void assert_block_in_covered_region(MemRegion new_memregion) {
+ // Explictly capture current covered_region in a local
+ MemRegion covered_region = this->start_array()->covered_region();
+ assert(covered_region.contains(new_memregion),
+ err_msg("new region is not in covered_region [ "PTR_FORMAT", "PTR_FORMAT" ], "
+ "new region [ "PTR_FORMAT", "PTR_FORMAT" ], "
+ "object space [ "PTR_FORMAT", "PTR_FORMAT" ]",
+ p2i(covered_region.start()),
+ p2i(covered_region.end()),
+ p2i(new_memregion.start()),
+ p2i(new_memregion.end()),
+ p2i(this->object_space()->used_region().start()),
+ p2i(this->object_space()->used_region().end())));
+ }
+#endif
+
HeapWord* allocate_noexpand(size_t word_size) {
// We assume the heap lock is held here.
assert_locked_or_safepoint(Heap_lock);
HeapWord* res = object_space()->allocate(word_size);
if (res != NULL) {
+ DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
_start_array.allocate_block(res);
}
return res;
@@ -77,6 +95,7 @@
assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint");
HeapWord* res = object_space()->cas_allocate(word_size);
if (res != NULL) {
+ DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
_start_array.allocate_block(res);
}
return res;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,6 @@
\
static_field(ParallelScavengeHeap, _young_gen, PSYoungGen*) \
static_field(ParallelScavengeHeap, _old_gen, PSOldGen*) \
- static_field(ParallelScavengeHeap, _psh, ParallelScavengeHeap*) \
\
#define VM_TYPES_PARALLELGC(declare_type, \
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -338,15 +338,6 @@
MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer();
}
-#ifndef PRODUCT
-
-void MarkSweep::trace(const char* msg) {
- if (TraceMarkSweep)
- gclog_or_tty->print("%s", msg);
-}
-
-#endif
-
int InstanceKlass::oop_ms_adjust_pointers(oop obj) {
int size = size_helper();
oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::adjust_pointer_closure);
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -131,9 +131,6 @@
// Non public closures
static KeepAliveClosure keep_alive;
- // Debugging
- static void trace(const char* msg) PRODUCT_RETURN;
-
public:
// Public closures
static IsAliveClosure is_alive;
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp Thu Apr 16 14:05:48 2015 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
-#include "memory/threadLocalAllocBuffer.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/oop.inline.hpp"
-
-size_t ParGCAllocBuffer::min_size() {
- // Make sure that we return something that is larger than AlignmentReserve
- return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
-}
-
-size_t ParGCAllocBuffer::max_size() {
- return ThreadLocalAllocBuffer::max_size();
-}
-
-ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
- _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
- _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0)
-{
- // ArrayOopDesc::header_size depends on command line initialization.
- AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
- assert(min_size() > AlignmentReserve,
- err_msg("Minimum PLAB size " SIZE_FORMAT" must be larger than alignment reserve " SIZE_FORMAT" "
- "to be able to contain objects", min_size(), AlignmentReserve));
-}
-
-// If the minimum object size is greater than MinObjAlignment, we can
-// end up with a shard at the end of the buffer that's smaller than
-// the smallest object. We can't allow that because the buffer must
-// look like it's full of objects when we retire it, so we make
-// sure we have enough space for a filler int array object.
-size_t ParGCAllocBuffer::AlignmentReserve;
-
-void ParGCAllocBuffer::flush_and_retire_stats(PLABStats* stats) {
- // Retire the last allocation buffer.
- size_t unused = retire_internal();
-
- // Now flush the statistics.
- stats->add_allocated(_allocated);
- stats->add_wasted(_wasted);
- stats->add_unused(unused);
-
- // Since we have flushed the stats we need to clear the _allocated and _wasted
- // fields in case somebody retains an instance of this over GCs. Not doing so
- // will artifically inflate the values in the statistics.
- _allocated = 0;
- _wasted = 0;
-}
-
-void ParGCAllocBuffer::retire() {
- _wasted += retire_internal();
-}
-
-size_t ParGCAllocBuffer::retire_internal() {
- size_t result = 0;
- if (_top < _hard_end) {
- CollectedHeap::fill_with_object(_top, _hard_end);
- result += invalidate();
- }
- return result;
-}
-
-// Compute desired plab size and latch result for later
-// use. This should be called once at the end of parallel
-// scavenge; it clears the sensor accumulators.
-void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
- assert(ResizePLAB, "Not set");
-
- assert(is_object_aligned(max_size()) && min_size() <= max_size(),
- "PLAB clipping computation may be incorrect");
-
- if (_allocated == 0) {
- assert(_unused == 0,
- err_msg("Inconsistency in PLAB stats: "
- "_allocated: "SIZE_FORMAT", "
- "_wasted: "SIZE_FORMAT", "
- "_unused: "SIZE_FORMAT,
- _allocated, _wasted, _unused));
-
- _allocated = 1;
- }
- double wasted_frac = (double)_unused / (double)_allocated;
- size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
- if (target_refills == 0) {
- target_refills = 1;
- }
- size_t used = _allocated - _wasted - _unused;
- size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
- // Take historical weighted average
- _filter.sample(recent_plab_sz);
- // Clip from above and below, and align to object boundary
- size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
- new_plab_sz = MIN2(max_size(), new_plab_sz);
- new_plab_sz = align_object_size(new_plab_sz);
- // Latch the result
- if (PrintPLAB) {
- gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
- }
- _desired_plab_sz = new_plab_sz;
-
- reset();
-}
-
-#ifndef PRODUCT
-void ParGCAllocBuffer::print() {
- gclog_or_tty->print_cr("parGCAllocBuffer: _bottom: " PTR_FORMAT " _top: " PTR_FORMAT
- " _end: " PTR_FORMAT " _hard_end: " PTR_FORMAT ")",
- p2i(_bottom), p2i(_top), p2i(_end), p2i(_hard_end));
-}
-#endif // !PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Thu Apr 16 14:05:48 2015 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,197 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-
-#include "gc_implementation/shared/gcUtil.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/atomic.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// Forward declarations.
-class PLABStats;
-
-// A per-thread allocation buffer used during GC.
-class ParGCAllocBuffer: public CHeapObj<mtGC> {
-protected:
- char head[32];
- size_t _word_sz; // In HeapWord units
- HeapWord* _bottom;
- HeapWord* _top;
- HeapWord* _end; // Last allocatable address + 1
- HeapWord* _hard_end; // _end + AlignmentReserve
- // In support of ergonomic sizing of PLAB's
- size_t _allocated; // in HeapWord units
- size_t _wasted; // in HeapWord units
- char tail[32];
- static size_t AlignmentReserve;
-
- // Force future allocations to fail and queries for contains()
- // to return false. Returns the amount of unused space in this PLAB.
- size_t invalidate() {
- _end = _hard_end;
- size_t remaining = pointer_delta(_end, _top); // Calculate remaining space.
- _top = _end; // Force future allocations to fail.
- _bottom = _end; // Force future contains() queries to return false.
- return remaining;
- }
-
- // Fill in remaining space with a dummy object and invalidate the PLAB. Returns
- // the amount of remaining space.
- size_t retire_internal();
-
-public:
- // Initializes the buffer to be empty, but with the given "word_sz".
- // Must get initialized with "set_buf" for an allocation to succeed.
- ParGCAllocBuffer(size_t word_sz);
- virtual ~ParGCAllocBuffer() {}
-
- // Minimum PLAB size.
- static size_t min_size();
- // Maximum PLAB size.
- static size_t max_size();
-
- // If an allocation of the given "word_sz" can be satisfied within the
- // buffer, do the allocation, returning a pointer to the start of the
- // allocated block. If the allocation request cannot be satisfied,
- // return NULL.
- HeapWord* allocate(size_t word_sz) {
- HeapWord* res = _top;
- if (pointer_delta(_end, _top) >= word_sz) {
- _top = _top + word_sz;
- return res;
- } else {
- return NULL;
- }
- }
-
- // Allocate the object aligned to "alignment_in_bytes".
- HeapWord* allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes);
-
- // Undo the last allocation in the buffer, which is required to be of the
- // "obj" of the given "word_sz".
- void undo_allocation(HeapWord* obj, size_t word_sz) {
- assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
- assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
- _top = obj;
- }
-
- // The total (word) size of the buffer, including both allocated and
- // unallocated space.
- size_t word_sz() { return _word_sz; }
-
- // Should only be done if we are about to reset with a new buffer of the
- // given size.
- void set_word_size(size_t new_word_sz) {
- assert(new_word_sz > AlignmentReserve, "Too small");
- _word_sz = new_word_sz;
- }
-
- // The number of words of unallocated space remaining in the buffer.
- size_t words_remaining() {
- assert(_end >= _top, "Negative buffer");
- return pointer_delta(_end, _top, HeapWordSize);
- }
-
- bool contains(void* addr) {
- return (void*)_bottom <= addr && addr < (void*)_hard_end;
- }
-
- // Sets the space of the buffer to be [buf, space+word_sz()).
- virtual void set_buf(HeapWord* buf) {
- _bottom = buf;
- _top = _bottom;
- _hard_end = _bottom + word_sz();
- _end = _hard_end - AlignmentReserve;
- assert(_end >= _top, "Negative buffer");
- // In support of ergonomic sizing
- _allocated += word_sz();
- }
-
- // Flush allocation statistics into the given PLABStats supporting ergonomic
- // sizing of PLAB's and retire the current buffer. To be called at the end of
- // GC.
- void flush_and_retire_stats(PLABStats* stats);
-
- // Fills in the unallocated portion of the buffer with a garbage object and updates
- // statistics. To be called during GC.
- virtual void retire();
-
- void print() PRODUCT_RETURN;
-};
-
-// PLAB book-keeping.
-class PLABStats VALUE_OBJ_CLASS_SPEC {
- size_t _allocated; // Total allocated
- size_t _wasted; // of which wasted (internal fragmentation)
- size_t _unused; // Unused in last buffer
- size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized
- AdaptiveWeightedAverage
- _filter; // Integrator with decay
-
- void reset() {
- _allocated = 0;
- _wasted = 0;
- _unused = 0;
- }
- public:
- PLABStats(size_t desired_plab_sz_, unsigned wt) :
- _allocated(0),
- _wasted(0),
- _unused(0),
- _desired_plab_sz(desired_plab_sz_),
- _filter(wt)
- { }
-
- static const size_t min_size() {
- return ParGCAllocBuffer::min_size();
- }
-
- static const size_t max_size() {
- return ParGCAllocBuffer::max_size();
- }
-
- size_t desired_plab_sz() {
- return _desired_plab_sz;
- }
-
- // Updates the current desired PLAB size. Computes the new desired PLAB size,
- // updates _desired_plab_sz and clears sensor accumulators.
- void adjust_desired_plab_sz(uint no_of_gc_workers);
-
- void add_allocated(size_t v) {
- Atomic::add_ptr(v, &_allocated);
- }
-
- void add_unused(size_t v) {
- Atomic::add_ptr(v, &_unused);
- }
-
- void add_wasted(size_t v) {
- Atomic::add_ptr(v, &_wasted);
- }
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.inline.hpp Thu Apr 16 14:05:48 2015 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
-
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
-#include "gc_interface/collectedHeap.inline.hpp"
-
-HeapWord* ParGCAllocBuffer::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) {
-
- HeapWord* res = CollectedHeap::align_allocation_or_fail(_top, _end, alignment_in_bytes);
- if (res == NULL) {
- return NULL;
- }
-
- // Set _top so that allocate(), which expects _top to be correctly set,
- // can be used below.
- _top = res;
- return allocate(word_sz);
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/plab.hpp"
+#include "memory/threadLocalAllocBuffer.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/oop.inline.hpp"
+
+size_t PLAB::min_size() {
+ // Make sure that we return something that is larger than AlignmentReserve
+ return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
+}
+
+size_t PLAB::max_size() {
+ return ThreadLocalAllocBuffer::max_size();
+}
+
+PLAB::PLAB(size_t desired_plab_sz_) :
+ _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
+ _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0)
+{
+ // ArrayOopDesc::header_size depends on command line initialization.
+ AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
+ assert(min_size() > AlignmentReserve,
+ err_msg("Minimum PLAB size " SIZE_FORMAT" must be larger than alignment reserve " SIZE_FORMAT" "
+ "to be able to contain objects", min_size(), AlignmentReserve));
+}
+
+// If the minimum object size is greater than MinObjAlignment, we can
+// end up with a shard at the end of the buffer that's smaller than
+// the smallest object. We can't allow that because the buffer must
+// look like it's full of objects when we retire it, so we make
+// sure we have enough space for a filler int array object.
+size_t PLAB::AlignmentReserve;
+
+void PLAB::flush_and_retire_stats(PLABStats* stats) {
+ // Retire the last allocation buffer.
+ size_t unused = retire_internal();
+
+ // Now flush the statistics.
+ stats->add_allocated(_allocated);
+ stats->add_wasted(_wasted);
+ stats->add_unused(unused);
+
+ // Since we have flushed the stats we need to clear the _allocated and _wasted
+ // fields in case somebody retains an instance of this over GCs. Not doing so
+ // will artifically inflate the values in the statistics.
+ _allocated = 0;
+ _wasted = 0;
+}
+
+void PLAB::retire() {
+ _wasted += retire_internal();
+}
+
+size_t PLAB::retire_internal() {
+ size_t result = 0;
+ if (_top < _hard_end) {
+ CollectedHeap::fill_with_object(_top, _hard_end);
+ result += invalidate();
+ }
+ return result;
+}
+
+// Compute desired plab size and latch result for later
+// use. This should be called once at the end of parallel
+// scavenge; it clears the sensor accumulators.
+void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
+ assert(ResizePLAB, "Not set");
+
+ assert(is_object_aligned(max_size()) && min_size() <= max_size(),
+ "PLAB clipping computation may be incorrect");
+
+ if (_allocated == 0) {
+ assert(_unused == 0,
+ err_msg("Inconsistency in PLAB stats: "
+ "_allocated: "SIZE_FORMAT", "
+ "_wasted: "SIZE_FORMAT", "
+ "_unused: "SIZE_FORMAT,
+ _allocated, _wasted, _unused));
+
+ _allocated = 1;
+ }
+ double wasted_frac = (double)_unused / (double)_allocated;
+ size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
+ if (target_refills == 0) {
+ target_refills = 1;
+ }
+ size_t used = _allocated - _wasted - _unused;
+ size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
+ // Take historical weighted average
+ _filter.sample(recent_plab_sz);
+ // Clip from above and below, and align to object boundary
+ size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
+ new_plab_sz = MIN2(max_size(), new_plab_sz);
+ new_plab_sz = align_object_size(new_plab_sz);
+ // Latch the result
+ if (PrintPLAB) {
+ gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
+ }
+ _desired_plab_sz = new_plab_sz;
+
+ reset();
+}
+
+#ifndef PRODUCT
+void PLAB::print() {
+ gclog_or_tty->print_cr("PLAB: _bottom: " PTR_FORMAT " _top: " PTR_FORMAT
+ " _end: " PTR_FORMAT " _hard_end: " PTR_FORMAT ")",
+ p2i(_bottom), p2i(_top), p2i(_end), p2i(_hard_end));
+}
+#endif // !PRODUCT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_HPP
+
+#include "gc_implementation/shared/gcUtil.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Forward declarations.
+class PLABStats;
+
+// A per-thread allocation buffer used during GC.
+class PLAB: public CHeapObj<mtGC> {
+protected:
+ char head[32];
+ size_t _word_sz; // In HeapWord units
+ HeapWord* _bottom;
+ HeapWord* _top;
+ HeapWord* _end; // Last allocatable address + 1
+ HeapWord* _hard_end; // _end + AlignmentReserve
+ // In support of ergonomic sizing of PLAB's
+ size_t _allocated; // in HeapWord units
+ size_t _wasted; // in HeapWord units
+ char tail[32];
+ static size_t AlignmentReserve;
+
+ // Force future allocations to fail and queries for contains()
+ // to return false. Returns the amount of unused space in this PLAB.
+ size_t invalidate() {
+ _end = _hard_end;
+ size_t remaining = pointer_delta(_end, _top); // Calculate remaining space.
+ _top = _end; // Force future allocations to fail.
+ _bottom = _end; // Force future contains() queries to return false.
+ return remaining;
+ }
+
+ // Fill in remaining space with a dummy object and invalidate the PLAB. Returns
+ // the amount of remaining space.
+ size_t retire_internal();
+
+public:
+ // Initializes the buffer to be empty, but with the given "word_sz".
+ // Must get initialized with "set_buf" for an allocation to succeed.
+ PLAB(size_t word_sz);
+ virtual ~PLAB() {}
+
+ // Minimum PLAB size.
+ static size_t min_size();
+ // Maximum PLAB size.
+ static size_t max_size();
+
+ // If an allocation of the given "word_sz" can be satisfied within the
+ // buffer, do the allocation, returning a pointer to the start of the
+ // allocated block. If the allocation request cannot be satisfied,
+ // return NULL.
+ HeapWord* allocate(size_t word_sz) {
+ HeapWord* res = _top;
+ if (pointer_delta(_end, _top) >= word_sz) {
+ _top = _top + word_sz;
+ return res;
+ } else {
+ return NULL;
+ }
+ }
+
+ // Allocate the object aligned to "alignment_in_bytes".
+ HeapWord* allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes);
+
+ // Undo the last allocation in the buffer, which is required to be of the
+ // "obj" of the given "word_sz".
+ void undo_allocation(HeapWord* obj, size_t word_sz) {
+ assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
+ assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
+ _top = obj;
+ }
+
+ // The total (word) size of the buffer, including both allocated and
+ // unallocated space.
+ size_t word_sz() { return _word_sz; }
+
+ // Should only be done if we are about to reset with a new buffer of the
+ // given size.
+ void set_word_size(size_t new_word_sz) {
+ assert(new_word_sz > AlignmentReserve, "Too small");
+ _word_sz = new_word_sz;
+ }
+
+ // The number of words of unallocated space remaining in the buffer.
+ size_t words_remaining() {
+ assert(_end >= _top, "Negative buffer");
+ return pointer_delta(_end, _top, HeapWordSize);
+ }
+
+ bool contains(void* addr) {
+ return (void*)_bottom <= addr && addr < (void*)_hard_end;
+ }
+
+ // Sets the space of the buffer to be [buf, space+word_sz()).
+ virtual void set_buf(HeapWord* buf) {
+ _bottom = buf;
+ _top = _bottom;
+ _hard_end = _bottom + word_sz();
+ _end = _hard_end - AlignmentReserve;
+ assert(_end >= _top, "Negative buffer");
+ // In support of ergonomic sizing
+ _allocated += word_sz();
+ }
+
+ // Flush allocation statistics into the given PLABStats supporting ergonomic
+ // sizing of PLAB's and retire the current buffer. To be called at the end of
+ // GC.
+ void flush_and_retire_stats(PLABStats* stats);
+
+ // Fills in the unallocated portion of the buffer with a garbage object and updates
+ // statistics. To be called during GC.
+ virtual void retire();
+
+ void print() PRODUCT_RETURN;
+};
+
+// PLAB book-keeping.
+class PLABStats VALUE_OBJ_CLASS_SPEC {
+ size_t _allocated; // Total allocated
+ size_t _wasted; // of which wasted (internal fragmentation)
+ size_t _unused; // Unused in last buffer
+ size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized
+ AdaptiveWeightedAverage
+ _filter; // Integrator with decay
+
+ void reset() {
+ _allocated = 0;
+ _wasted = 0;
+ _unused = 0;
+ }
+ public:
+ PLABStats(size_t desired_plab_sz_, unsigned wt) :
+ _allocated(0),
+ _wasted(0),
+ _unused(0),
+ _desired_plab_sz(desired_plab_sz_),
+ _filter(wt)
+ { }
+
+ static const size_t min_size() {
+ return PLAB::min_size();
+ }
+
+ static const size_t max_size() {
+ return PLAB::max_size();
+ }
+
+ size_t desired_plab_sz() {
+ return _desired_plab_sz;
+ }
+
+ // Updates the current desired PLAB size. Computes the new desired PLAB size,
+ // updates _desired_plab_sz and clears sensor accumulators.
+ void adjust_desired_plab_sz(uint no_of_gc_workers);
+
+ void add_allocated(size_t v) {
+ Atomic::add_ptr(v, &_allocated);
+ }
+
+ void add_unused(size_t v) {
+ Atomic::add_ptr(v, &_unused);
+ }
+
+ void add_wasted(size_t v) {
+ Atomic::add_ptr(v, &_wasted);
+ }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.inline.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_INLINE_HPP
+
+#include "gc_implementation/shared/plab.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+
+HeapWord* PLAB::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) {
+
+ HeapWord* res = CollectedHeap::align_allocation_or_fail(_top, _end, alignment_in_bytes);
+ if (res == NULL) {
+ return NULL;
+ }
+
+ // Set _top so that allocate(), which expects _top to be correctly set,
+ // can be used below.
+ _top = res;
+ return allocate(word_sz);
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_INLINE_HPP
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -59,7 +59,6 @@
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
#endif // INCLUDE_ALL_GCS
-GenCollectedHeap* GenCollectedHeap::_gch;
NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
// The set of potentially parallel tasks in root scanning.
@@ -127,8 +126,6 @@
_rem_set = collector_policy()->create_rem_set(reserved_region());
set_barrier_set(rem_set()->bs());
- _gch = this;
-
ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
_young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
@@ -1103,9 +1100,10 @@
}
GenCollectedHeap* GenCollectedHeap::heap() {
- assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
- assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
- return _gch;
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
+ assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
+ return (GenCollectedHeap*)heap;
}
void GenCollectedHeap::prepare_for_compaction() {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -54,11 +54,7 @@
public:
friend class VM_PopulateDumpSharedSpace;
- protected:
- // Fields:
- static GenCollectedHeap* _gch;
-
- private:
+private:
Generation* _young_gen;
Generation* _old_gen;
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -187,7 +187,6 @@
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace(" 1");
GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -258,7 +257,6 @@
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("2");
gch->prepare_for_compaction();
}
@@ -275,7 +273,6 @@
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("3");
// Need new claim bits for the pointer adjustment tracing.
ClassLoaderDataGraph::clear_claimed_marks();
@@ -325,7 +322,6 @@
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("4");
GenCompactClosure blk;
gch->generation_iterate(&blk, true);
--- a/hotspot/src/share/vm/memory/universe.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -687,6 +687,15 @@
return JNI_OK;
}
+template <class Heap, class Policy>
+jint Universe::create_heap() {
+ assert(_collectedHeap == NULL, "Heap already created");
+ Policy* policy = new Policy();
+ policy->initialize_all();
+ _collectedHeap = new Heap(policy);
+ return _collectedHeap->initialize();
+}
+
// Choose the heap base address and oop encoding mode
// when compressed oops are used:
// Unscaled - Use 32-bits oops without encoding when
@@ -696,50 +705,37 @@
// HeapBased - Use compressed oops with heap base + encoding.
jint Universe::initialize_heap() {
-
- if (UseParallelGC) {
-#if INCLUDE_ALL_GCS
- Universe::_collectedHeap = new ParallelScavengeHeap();
-#else // INCLUDE_ALL_GCS
- fatal("UseParallelGC not supported in this VM.");
-#endif // INCLUDE_ALL_GCS
-
- } else if (UseG1GC) {
-#if INCLUDE_ALL_GCS
- G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
- g1p->initialize_all();
- G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
- Universe::_collectedHeap = g1h;
-#else // INCLUDE_ALL_GCS
- fatal("UseG1GC not supported in java kernel vm.");
-#endif // INCLUDE_ALL_GCS
+ jint status = JNI_ERR;
- } else {
- GenCollectorPolicy *gc_policy;
+#if !INCLUDE_ALL_GCS
+ if (UseParallelGC) {
+ fatal("UseParallelGC not supported in this VM.");
+ } else if (UseG1GC) {
+ fatal("UseG1GC not supported in this VM.");
+ } else if (UseConcMarkSweepGC) {
+ fatal("UseConcMarkSweepGC not supported in this VM.");
+ }
+#else
+ if (UseParallelGC) {
+ status = Universe::create_heap<ParallelScavengeHeap, GenerationSizer>();
+ } else if (UseG1GC) {
+ status = Universe::create_heap<G1CollectedHeap, G1CollectorPolicyExt>();
+ } else if (UseConcMarkSweepGC) {
+ status = Universe::create_heap<GenCollectedHeap, ConcurrentMarkSweepPolicy>();
+ }
+#endif
+ else { // UseSerialGC
+ // Don't assert that UseSerialGC is set here because there are cases
+ // where no GC it set and we then fall back to using SerialGC.
+ status = Universe::create_heap<GenCollectedHeap, MarkSweepPolicy>();
+ }
- if (UseSerialGC) {
- gc_policy = new MarkSweepPolicy();
- } else if (UseConcMarkSweepGC) {
-#if INCLUDE_ALL_GCS
- gc_policy = new ConcurrentMarkSweepPolicy();
-#else // INCLUDE_ALL_GCS
- fatal("UseConcMarkSweepGC not supported in this VM.");
-#endif // INCLUDE_ALL_GCS
- } else { // default old generation
- gc_policy = new MarkSweepPolicy();
- }
- gc_policy->initialize_all();
-
- Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
+ if (status != JNI_OK) {
+ return status;
}
ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
- jint status = Universe::heap()->initialize();
- if (status != JNI_OK) {
- return status;
- }
-
#ifdef _LP64
if (UseCompressedOops) {
// Subtract a page because something can get allocated at heap base.
@@ -1063,7 +1059,7 @@
MemoryService::add_metaspace_memory_pools();
- MemoryService::set_universe_heap(Universe::_collectedHeap);
+ MemoryService::set_universe_heap(Universe::heap());
#if INCLUDE_CDS
SharedClassUtil::initialize(CHECK_false);
#endif
--- a/hotspot/src/share/vm/memory/universe.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/memory/universe.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -214,6 +214,7 @@
static size_t _heap_capacity_at_last_gc;
static size_t _heap_used_at_last_gc;
+ template <class Heap, class Policy> static jint create_heap();
static jint initialize_heap();
static void initialize_basic_type_mirrors(TRAPS);
static void fixup_mirrors(TRAPS);
--- a/hotspot/src/share/vm/oops/oop.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/oops/oop.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -201,7 +201,6 @@
// Access to fields in a instanceOop through these methods.
oop obj_field(int offset) const;
- volatile oop obj_field_volatile(int offset) const;
void obj_field_put(int offset, oop value);
void obj_field_put_raw(int offset, oop value);
void obj_field_put_volatile(int offset, oop value);
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -284,11 +284,6 @@
load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
load_decode_heap_oop(obj_field_addr<oop>(offset));
}
-inline volatile oop oopDesc::obj_field_volatile(int offset) const {
- volatile oop value = obj_field(offset);
- OrderAccess::acquire();
- return value;
-}
inline void oopDesc::obj_field_put(int offset, oop value) {
UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
oop_store(obj_field_addr<oop>(offset), value);
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -315,7 +315,7 @@
# include "gc_implementation/parallelScavenge/psYoungGen.hpp"
# include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
# include "gc_implementation/shared/gcPolicyCounters.hpp"
-# include "gc_implementation/shared/parGCAllocBuffer.hpp"
+# include "gc_implementation/shared/plab.hpp"
#endif // INCLUDE_ALL_GCS
#endif // !DONT_USE_PRECOMPILED_HEADER
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Mon Apr 20 14:26:54 2015 +0200
@@ -1960,7 +1960,7 @@
"collection") \
\
develop(uintx, PromotionFailureALotCount, 1000, \
- "Number of promotion failures occurring at ParGCAllocBuffer " \
+ "Number of promotion failures occurring at PLAB " \
"refill attempts (ParNew) or promotion attempts " \
"(other young collectors)") \
\
@@ -2290,9 +2290,6 @@
"If non-zero, assert that GC threads yield within this " \
"number of milliseconds") \
\
- notproduct(bool, TraceMarkSweep, false, \
- "Trace mark sweep") \
- \
product(bool, PrintReferenceGC, false, \
"Print times spent handling reference objects during GC " \
"(enabled only when PrintGCDetails)") \
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Apr 20 14:26:54 2015 +0200
@@ -555,7 +555,6 @@
nonstatic_field(GenerationSpec, _init_size, size_t) \
nonstatic_field(GenerationSpec, _max_size, size_t) \
\
- static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \
nonstatic_field(GenCollectedHeap, _young_gen, Generation*) \
nonstatic_field(GenCollectedHeap, _old_gen, Generation*) \
\
--- a/hotspot/test/Makefile Thu Apr 16 14:05:48 2015 -0700
+++ b/hotspot/test/Makefile Mon Apr 20 14:26:54 2015 +0200
@@ -344,6 +344,34 @@
################################################################
+# basicvmtest (make sure various basic java options work)
+
+# Set up the directory in which the jvm directories live (client/, server/, etc.)
+ifeq ($(PLATFORM),windows)
+JVMS_DIR := $(PRODUCT_HOME)/bin
+else ifeq ($(PLATFORM),bsd)
+JVMS_DIR := $(PRODUCT_HOME)/lib
+else
+# The jvms live in the architecture directory (amd64, sparcv9,
+# etc.). By using a wildcard there's no need to figure out the exact
+# name of that directory.
+JVMS_DIR := $(PRODUCT_HOME)/lib/*
+endif
+
+# Use the existance of a directory as a sign that jvm variant is available
+CANDIDATE_JVM_VARIANTS := client minimal server
+JVM_VARIANTS := $(strip $(foreach x,$(CANDIDATE_JVM_VARIANTS),$(if $(wildcard $(JVMS_DIR)/$(x)),$(x))))
+
+hotspot_basicvmtest:
+ for variant in $(JVM_VARIANTS); \
+ do \
+ $(MAKE) JAVA_ARGS="$(JAVA_ARGS) -$$variant" hotspot_$${variant}test; \
+ done
+
+PHONY_LIST += hotspot_basicvmtest
+
+################################################################
+
# clienttest (make sure various basic java client options work)
hotspot_clienttest clienttest: sanitytest