# HG changeset patch # User tschatzl # Date 1440426461 -7200 # Node ID 626f27450e121e80533250ce06fa430b456b2a9d # Parent 81663b0d3631dd4fba6cac0baa8156df485adcb1 8067336: Allow that PLAB allocations at the end of regions are flexible Summary: PLAB allocations may return a buffer that is between minimum size (current allocation) and the desired size. This allows removes a large amount of fragmentation at the end of regions. Reviewed-by: tbenson, mgerdin diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp --- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp Mon Aug 24 16:27:41 2015 +0200 @@ -205,11 +205,11 @@ } #if G1_ALLOC_REGION_TRACING -void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) { +void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) { // All the calls to trace that set either just the size or the size // and the result are considered part of level 2 tracing and are // skipped during level 1 tracing. - if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) { + if ((actual_word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) { const size_t buffer_length = 128; char hr_buffer[buffer_length]; char rest_buffer[buffer_length]; @@ -226,10 +226,10 @@ if (G1_ALLOC_REGION_TRACING > 1) { if (result != NULL) { - jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT " " PTR_FORMAT, - word_size, result); - } else if (word_size != 0) { - jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size); + jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT, + min_word_size, desired_word_size, actual_word_size, result); + } else if (min_word_size != 0) { + jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size); } else { jio_snprintf(rest_buffer, buffer_length, ""); } diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/g1AllocRegion.hpp --- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.hpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.hpp Mon Aug 24 16:27:41 2015 +0200 @@ -104,6 +104,15 @@ static inline HeapWord* par_allocate(HeapRegion* alloc_region, size_t word_size, bool bot_updates); + // Perform a MT-safe allocation out of the given region, with the given + // minimum and desired size. Returns the actual size allocated (between + // minimum and desired size) in actual_word_size if the allocation has been + // successful. + static inline HeapWord* par_allocate(HeapRegion* alloc_region, + size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, + bool bot_updates); // Ensure that the region passed as a parameter has been filled up // so that noone else can allocate out of it any more. @@ -159,7 +168,18 @@ // First-level allocation: Should be called without holding a // lock. It will try to allocate lock-free out of the active region, // or return NULL if it was unable to. - inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates); + inline HeapWord* attempt_allocation(size_t word_size, + bool bot_updates); + // Perform an allocation out of the current allocation region, with the given + // minimum and desired size. Returns the actual size allocated (between + // minimum and desired size) in actual_word_size if the allocation has been + // successful. + // Should be called without holding a lock. It will try to allocate lock-free + // out of the active region, or return NULL if it was unable to. + inline HeapWord* attempt_allocation(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, + bool bot_updates); // Second-level allocation: Should be called while holding a // lock. It will try to first allocate lock-free out of the active @@ -169,6 +189,14 @@ // it conform to its locking protocol. inline HeapWord* attempt_allocation_locked(size_t word_size, bool bot_updates); + // Same as attempt_allocation_locked(size_t, bool), but allowing specification + // of minimum word size of the block in min_word_size, and the maximum word + // size of the allocation in desired_word_size. The actual size of the block is + // returned in actual_word_size. + inline HeapWord* attempt_allocation_locked(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, + bool bot_updates); // Should be called to allocate a new region even if the max of this // type of regions has been reached. Should only be called if other @@ -191,9 +219,17 @@ virtual HeapRegion* release(); #if G1_ALLOC_REGION_TRACING - void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL); + void trace(const char* str, + size_t min_word_size = 0, + size_t desired_word_size = 0, + size_t actual_word_size = 0, + HeapWord* result = NULL); #else // G1_ALLOC_REGION_TRACING - void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { } + void trace(const char* str, + size_t min_word_size = 0, + size_t desired_word_size = 0, + size_t actual_word_size = 0, + HeapWord* result = NULL) { } #endif // G1_ALLOC_REGION_TRACING }; diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/g1AllocRegion.inline.hpp --- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.inline.hpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.inline.hpp Mon Aug 24 16:27:41 2015 +0200 @@ -40,52 +40,74 @@ } } +inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region, size_t word_size, bool bot_updates) { + size_t temp; + return par_allocate(alloc_region, word_size, word_size, &temp, bot_updates); +} + inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region, - size_t word_size, + size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, bool bot_updates) { assert(alloc_region != NULL, err_msg("pre-condition")); assert(!alloc_region->is_empty(), err_msg("pre-condition")); if (!bot_updates) { - return alloc_region->par_allocate_no_bot_updates(word_size); + return alloc_region->par_allocate_no_bot_updates(min_word_size, desired_word_size, actual_word_size); } else { - return alloc_region->par_allocate(word_size); + return alloc_region->par_allocate(min_word_size, desired_word_size, actual_word_size); } } -inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size, +inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size, bool bot_updates) { + size_t temp; + return attempt_allocation(word_size, word_size, &temp, bot_updates); +} + +inline HeapWord* G1AllocRegion::attempt_allocation(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, bool bot_updates) { assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition")); HeapRegion* alloc_region = _alloc_region; assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); - HeapWord* result = par_allocate(alloc_region, word_size, bot_updates); + HeapWord* result = par_allocate(alloc_region, min_word_size, desired_word_size, actual_word_size, bot_updates); if (result != NULL) { - trace("alloc", word_size, result); + trace("alloc", min_word_size, desired_word_size, *actual_word_size, result); return result; } - trace("alloc failed", word_size); + trace("alloc failed", min_word_size, desired_word_size); return NULL; } -inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size, +inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size, bool bot_updates) { + size_t temp; + return attempt_allocation_locked(word_size, word_size, &temp, bot_updates); +} + +inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, bool bot_updates) { // First we have to redo the allocation, assuming we're holding the // appropriate lock, in case another thread changed the region while // we were waiting to get the lock. - HeapWord* result = attempt_allocation(word_size, bot_updates); + HeapWord* result = attempt_allocation(min_word_size, desired_word_size, actual_word_size, bot_updates); if (result != NULL) { return result; } retire(true /* fill_up */); - result = new_alloc_region_and_allocate(word_size, false /* force */); + result = new_alloc_region_and_allocate(desired_word_size, false /* force */); if (result != NULL) { - trace("alloc locked (second attempt)", word_size, result); + *actual_word_size = desired_word_size; + trace("alloc locked (second attempt)", min_word_size, desired_word_size, *actual_word_size, result); return result; } - trace("alloc locked failed", word_size); + trace("alloc locked failed", min_word_size, desired_word_size); return NULL; } @@ -94,13 +116,13 @@ assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition")); assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); - trace("forcing alloc"); + trace("forcing alloc", word_size, word_size); HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */); if (result != NULL) { - trace("alloc forced", word_size, result); + trace("alloc forced", word_size, word_size, word_size, result); return result; } - trace("alloc forced failed", word_size); + trace("alloc forced failed", word_size, word_size); return NULL; } diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/g1Allocator.cpp --- a/hotspot/src/share/vm/gc/g1/g1Allocator.cpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1Allocator.cpp Mon Aug 24 16:27:41 2015 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/g1/g1Allocator.inline.hpp" +#include "gc/g1/g1AllocRegion.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1MarkSweep.hpp" @@ -143,11 +144,24 @@ HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, size_t word_size, AllocationContext_t context) { + size_t temp = 0; + HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context); + assert(result == NULL || temp == word_size, + err_msg("Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, + word_size, temp, p2i(result))); + return result; +} + +HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, + size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, + AllocationContext_t context) { switch (dest.value()) { case InCSetState::Young: - return survivor_attempt_allocation(word_size, context); + return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context); case InCSetState::Old: - return old_attempt_allocation(word_size, context); + return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context); default: ShouldNotReachHere(); return NULL; // Keep some compilers happy @@ -170,37 +184,49 @@ _old_is_full = true; } -HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size, +HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, AllocationContext_t context) { - assert(!_g1h->is_humongous(word_size), + assert(!_g1h->is_humongous(desired_word_size), "we should not be seeing humongous-size allocations in this path"); - HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size, + HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size, + desired_word_size, + actual_word_size, false /* bot_updates */); if (result == NULL && !survivor_is_full(context)) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, + result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size, + desired_word_size, + actual_word_size, false /* bot_updates */); if (result == NULL) { set_survivor_full(context); } } if (result != NULL) { - _g1h->dirty_young_block(result, word_size); + _g1h->dirty_young_block(result, *actual_word_size); } return result; } -HeapWord* G1Allocator::old_attempt_allocation(size_t word_size, +HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, AllocationContext_t context) { - assert(!_g1h->is_humongous(word_size), + assert(!_g1h->is_humongous(desired_word_size), "we should not be seeing humongous-size allocations in this path"); - HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size, + HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size, + desired_word_size, + actual_word_size, true /* bot_updates */); if (result == NULL && !old_is_full(context)) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size, + result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size, + desired_word_size, + actual_word_size, true /* bot_updates */); if (result == NULL) { set_old_full(context); @@ -242,10 +268,19 @@ G1PLAB* alloc_buf = alloc_buffer(dest, context); alloc_buf->retire(); - HeapWord* buf = _allocator->par_allocate_during_gc(dest, plab_word_size, context); + size_t actual_plab_size = 0; + HeapWord* buf = _allocator->par_allocate_during_gc(dest, + required_in_plab, + plab_word_size, + &actual_plab_size, + context); + + assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), + err_msg("Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, + required_in_plab, plab_word_size, actual_plab_size, p2i(buf))); + if (buf != NULL) { - // Otherwise. - alloc_buf->set_buf(buf, plab_word_size); + alloc_buf->set_buf(buf, actual_plab_size); HeapWord* const obj = alloc_buf->allocate(word_sz); assert(obj != NULL, err_msg("PLAB should have been big enough, tried to allocate " diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/g1Allocator.hpp --- a/hotspot/src/share/vm/gc/g1/g1Allocator.hpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1Allocator.hpp Mon Aug 24 16:27:41 2015 +0200 @@ -57,10 +57,14 @@ virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0; // Allocation attempt during GC for a survivor object / PLAB. - inline HeapWord* survivor_attempt_allocation(size_t word_size, + inline HeapWord* survivor_attempt_allocation(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, AllocationContext_t context); // Allocation attempt during GC for an old object / PLAB. - inline HeapWord* old_attempt_allocation(size_t word_size, + inline HeapWord* old_attempt_allocation(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, AllocationContext_t context); public: G1Allocator(G1CollectedHeap* heap) : _g1h(heap), _survivor_is_full(false), _old_is_full(false) { } @@ -102,6 +106,12 @@ size_t word_size, AllocationContext_t context); + HeapWord* par_allocate_during_gc(InCSetState dest, + size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size, + AllocationContext_t context); + virtual size_t used_in_alloc_regions() = 0; }; diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp --- a/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp Mon Aug 24 16:27:41 2015 +0200 @@ -26,7 +26,7 @@ #define SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_INLINE_HPP #include "gc/g1/g1BlockOffsetTable.hpp" -#include "gc/g1/heapRegion.inline.hpp" +#include "gc/g1/heapRegion.hpp" #include "gc/shared/space.hpp" inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/heapRegion.hpp --- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp Mon Aug 24 16:27:41 2015 +0200 @@ -109,7 +109,7 @@ // evacuation pauses between two cleanups, which is _highly_ unlikely. class G1OffsetTableContigSpace: public CompactibleSpace { friend class VMStructs; - HeapWord* _top; + HeapWord* volatile _top; HeapWord* volatile _scan_top; protected: G1BlockOffsetArrayContigSpace _offsets; @@ -134,10 +134,18 @@ // Reset the G1OffsetTableContigSpace. virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); - HeapWord** top_addr() { return &_top; } - // Allocation helpers (return NULL if full). - inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); - inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); + HeapWord* volatile* top_addr() { return &_top; } + // Try to allocate at least min_word_size and up to desired_size from this Space. + // Returns NULL if not possible, otherwise sets actual_word_size to the amount of + // space allocated. + // This version assumes that all allocation requests to this Space are properly + // synchronized. + inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); + // Try to allocate at least min_word_size and up to desired_size from this Space. + // Returns NULL if not possible, otherwise sets actual_word_size to the amount of + // space allocated. + // This version synchronizes with other calls to par_allocate_impl(). + inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); public: void reset_after_compaction() { set_top(compaction_top()); } @@ -179,9 +187,14 @@ HeapWord* block_start(const void* p); HeapWord* block_start_const(const void* p) const; - // Add offset table update. + // Allocation (return NULL if full). Assumes the caller has established + // mutually exclusive access to the space. + HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); + // Allocation (return NULL if full). Enforces mutual exclusion internally. + HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); + virtual HeapWord* allocate(size_t word_size); - HeapWord* par_allocate(size_t word_size); + virtual HeapWord* par_allocate(size_t word_size); HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; } @@ -351,8 +364,9 @@ // Override for scan_and_forward support. void prepare_for_compaction(CompactPoint* cp); - inline HeapWord* par_allocate_no_bot_updates(size_t word_size); + inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size); inline HeapWord* allocate_no_bot_updates(size_t word_size); + inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size); // If this region is a member of a HeapRegionManager, the index in that // sequence, otherwise -1. diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp --- a/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp Mon Aug 24 16:27:41 2015 +0200 @@ -32,33 +32,39 @@ #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" -// This version requires locking. -inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size, - HeapWord* const end_value) { +inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { HeapWord* obj = top(); - if (pointer_delta(end_value, obj) >= size) { - HeapWord* new_top = obj + size; + size_t available = pointer_delta(end(), obj); + size_t want_to_allocate = MIN2(available, desired_word_size); + if (want_to_allocate >= min_word_size) { + HeapWord* new_top = obj + want_to_allocate; set_top(new_top); assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + *actual_size = want_to_allocate; return obj; } else { return NULL; } } -// This version is lock-free. -inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size, - HeapWord* const end_value) { +inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { do { HeapWord* obj = top(); - if (pointer_delta(end_value, obj) >= size) { - HeapWord* new_top = obj + size; + size_t available = pointer_delta(end(), obj); + size_t want_to_allocate = MIN2(available, desired_word_size); + if (want_to_allocate >= min_word_size) { + HeapWord* new_top = obj + want_to_allocate; HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. if (result == obj) { assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + *actual_size = want_to_allocate; return obj; } } else { @@ -67,20 +73,34 @@ } while (true); } -inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { - HeapWord* res = allocate_impl(size, end()); +inline HeapWord* G1OffsetTableContigSpace::allocate(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { + HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); if (res != NULL) { - _offsets.alloc_block(res, size); + _offsets.alloc_block(res, *actual_size); } return res; } +inline HeapWord* G1OffsetTableContigSpace::allocate(size_t word_size) { + size_t temp; + return allocate(word_size, word_size, &temp); +} + +inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t word_size) { + size_t temp; + return par_allocate(word_size, word_size, &temp); +} + // Because of the requirement of keeping "_offsets" up to date with the // allocations, we sequentialize these with a lock. Therefore, best if // this is used for larger LAB allocations only. -inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { +inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { MutexLocker x(&_par_alloc_lock); - return allocate(size); + return allocate(min_word_size, desired_word_size, actual_size); } inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { @@ -128,14 +148,23 @@ return pointer_delta(next, addr); } -inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { +inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size) { assert(is_young(), "we can only skip BOT updates on young regions"); - return par_allocate_impl(word_size, end()); + return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); } inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { + size_t temp; + return allocate_no_bot_updates(word_size, word_size, &temp); +} + +inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size) { assert(is_young(), "we can only skip BOT updates on young regions"); - return allocate_impl(word_size, end()); + return allocate_impl(min_word_size, desired_word_size, actual_word_size); } inline void HeapRegion::note_start_of_marking() { diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/gc/g1/vmStructs_g1.hpp --- a/hotspot/src/share/vm/gc/g1/vmStructs_g1.hpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/gc/g1/vmStructs_g1.hpp Mon Aug 24 16:27:41 2015 +0200 @@ -34,7 +34,7 @@ static_field(HeapRegion, GrainBytes, size_t) \ static_field(HeapRegion, LogOfHRGrainBytes, int) \ \ - nonstatic_field(G1OffsetTableContigSpace, _top, HeapWord*) \ + nonstatic_field(G1OffsetTableContigSpace, _top, HeapWord* volatile) \ \ nonstatic_field(G1HeapRegionTable, _base, address) \ nonstatic_field(G1HeapRegionTable, _length, size_t) \ diff -r 81663b0d3631 -r 626f27450e12 hotspot/src/share/vm/runtime/vmStructs.cpp --- a/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Aug 25 00:26:10 2015 -0400 +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Aug 24 16:27:41 2015 +0200 @@ -1565,6 +1565,7 @@ declare_toplevel_type(Generation*) \ declare_toplevel_type(GenerationSpec**) \ declare_toplevel_type(HeapWord*) \ + declare_toplevel_type(HeapWord* volatile) \ declare_toplevel_type(MemRegion*) \ declare_toplevel_type(OffsetTableContigSpace*) \ declare_toplevel_type(Space*) \