src/hotspot/share/gc/g1/g1Allocator.cpp
changeset 47216 71c04702a3d5
parent 46810 7dad333205cd
child 48402 945332d45710
equal deleted inserted replaced
47215:4ebc2e2fb97c 47216:71c04702a3d5
       
     1 /*
       
     2  * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "gc/g1/g1Allocator.inline.hpp"
       
    27 #include "gc/g1/g1AllocRegion.inline.hpp"
       
    28 #include "gc/g1/g1EvacStats.inline.hpp"
       
    29 #include "gc/g1/g1CollectedHeap.inline.hpp"
       
    30 #include "gc/g1/heapRegion.inline.hpp"
       
    31 #include "gc/g1/heapRegionSet.inline.hpp"
       
    32 #include "utilities/align.hpp"
       
    33 
       
    34 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
       
    35   G1Allocator(heap),
       
    36   _survivor_is_full(false),
       
    37   _old_is_full(false),
       
    38   _retained_old_gc_alloc_region(NULL),
       
    39   _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
       
    40   _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
       
    41 }
       
    42 
       
    43 void G1DefaultAllocator::init_mutator_alloc_region() {
       
    44   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
       
    45   _mutator_alloc_region.init();
       
    46 }
       
    47 
       
    48 void G1DefaultAllocator::release_mutator_alloc_region() {
       
    49   _mutator_alloc_region.release();
       
    50   assert(_mutator_alloc_region.get() == NULL, "post-condition");
       
    51 }
       
    52 
       
    53 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
       
    54                                             OldGCAllocRegion* old,
       
    55                                             HeapRegion** retained_old) {
       
    56   HeapRegion* retained_region = *retained_old;
       
    57   *retained_old = NULL;
       
    58   assert(retained_region == NULL || !retained_region->is_archive(),
       
    59          "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
       
    60 
       
    61   // We will discard the current GC alloc region if:
       
    62   // a) it's in the collection set (it can happen!),
       
    63   // b) it's already full (no point in using it),
       
    64   // c) it's empty (this means that it was emptied during
       
    65   // a cleanup and it should be on the free list now), or
       
    66   // d) it's humongous (this means that it was emptied
       
    67   // during a cleanup and was added to the free list, but
       
    68   // has been subsequently used to allocate a humongous
       
    69   // object that may be less than the region size).
       
    70   if (retained_region != NULL &&
       
    71       !retained_region->in_collection_set() &&
       
    72       !(retained_region->top() == retained_region->end()) &&
       
    73       !retained_region->is_empty() &&
       
    74       !retained_region->is_humongous()) {
       
    75     retained_region->record_timestamp();
       
    76     // The retained region was added to the old region set when it was
       
    77     // retired. We have to remove it now, since we don't allow regions
       
    78     // we allocate to in the region sets. We'll re-add it later, when
       
    79     // it's retired again.
       
    80     _g1h->old_set_remove(retained_region);
       
    81     bool during_im = _g1h->collector_state()->during_initial_mark_pause();
       
    82     retained_region->note_start_of_copying(during_im);
       
    83     old->set(retained_region);
       
    84     _g1h->hr_printer()->reuse(retained_region);
       
    85     evacuation_info.set_alloc_regions_used_before(retained_region->used());
       
    86   }
       
    87 }
       
    88 
       
    89 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
       
    90   assert_at_safepoint(true /* should_be_vm_thread */);
       
    91 
       
    92   _survivor_is_full = false;
       
    93   _old_is_full = false;
       
    94 
       
    95   _survivor_gc_alloc_region.init();
       
    96   _old_gc_alloc_region.init();
       
    97   reuse_retained_old_region(evacuation_info,
       
    98                             &_old_gc_alloc_region,
       
    99                             &_retained_old_gc_alloc_region);
       
   100 }
       
   101 
       
   102 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
       
   103   AllocationContext_t context = AllocationContext::current();
       
   104   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
       
   105                                          old_gc_alloc_region(context)->count());
       
   106   survivor_gc_alloc_region(context)->release();
       
   107   // If we have an old GC alloc region to release, we'll save it in
       
   108   // _retained_old_gc_alloc_region. If we don't
       
   109   // _retained_old_gc_alloc_region will become NULL. This is what we
       
   110   // want either way so no reason to check explicitly for either
       
   111   // condition.
       
   112   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
       
   113 }
       
   114 
       
   115 void G1DefaultAllocator::abandon_gc_alloc_regions() {
       
   116   assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
       
   117   assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
       
   118   _retained_old_gc_alloc_region = NULL;
       
   119 }
       
   120 
       
   121 bool G1DefaultAllocator::survivor_is_full(AllocationContext_t context) const {
       
   122   return _survivor_is_full;
       
   123 }
       
   124 
       
   125 bool G1DefaultAllocator::old_is_full(AllocationContext_t context) const {
       
   126   return _old_is_full;
       
   127 }
       
   128 
       
   129 void G1DefaultAllocator::set_survivor_full(AllocationContext_t context) {
       
   130   _survivor_is_full = true;
       
   131 }
       
   132 
       
   133 void G1DefaultAllocator::set_old_full(AllocationContext_t context) {
       
   134   _old_is_full = true;
       
   135 }
       
   136 
       
   137 G1PLAB::G1PLAB(size_t gclab_word_size) :
       
   138   PLAB(gclab_word_size), _retired(true) { }
       
   139 
       
   140 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
       
   141   // Return the remaining space in the cur alloc region, but not less than
       
   142   // the min TLAB size.
       
   143 
       
   144   // Also, this value can be at most the humongous object threshold,
       
   145   // since we can't allow tlabs to grow big enough to accommodate
       
   146   // humongous objects.
       
   147 
       
   148   HeapRegion* hr = mutator_alloc_region(context)->get();
       
   149   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
       
   150   if (hr == NULL) {
       
   151     return max_tlab;
       
   152   } else {
       
   153     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
       
   154   }
       
   155 }
       
   156 
       
   157 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
       
   158                                               size_t word_size,
       
   159                                               AllocationContext_t context) {
       
   160   size_t temp = 0;
       
   161   HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
       
   162   assert(result == NULL || temp == word_size,
       
   163          "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
       
   164          word_size, temp, p2i(result));
       
   165   return result;
       
   166 }
       
   167 
       
   168 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
       
   169                                               size_t min_word_size,
       
   170                                               size_t desired_word_size,
       
   171                                               size_t* actual_word_size,
       
   172                                               AllocationContext_t context) {
       
   173   switch (dest.value()) {
       
   174     case InCSetState::Young:
       
   175       return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
       
   176     case InCSetState::Old:
       
   177       return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
       
   178     default:
       
   179       ShouldNotReachHere();
       
   180       return NULL; // Keep some compilers happy
       
   181   }
       
   182 }
       
   183 
       
   184 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
       
   185                                                    size_t desired_word_size,
       
   186                                                    size_t* actual_word_size,
       
   187                                                    AllocationContext_t context) {
       
   188   assert(!_g1h->is_humongous(desired_word_size),
       
   189          "we should not be seeing humongous-size allocations in this path");
       
   190 
       
   191   HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
       
   192                                                                            desired_word_size,
       
   193                                                                            actual_word_size,
       
   194                                                                            false /* bot_updates */);
       
   195   if (result == NULL && !survivor_is_full(context)) {
       
   196     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
       
   197     result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
       
   198                                                                           desired_word_size,
       
   199                                                                           actual_word_size,
       
   200                                                                           false /* bot_updates */);
       
   201     if (result == NULL) {
       
   202       set_survivor_full(context);
       
   203     }
       
   204   }
       
   205   if (result != NULL) {
       
   206     _g1h->dirty_young_block(result, *actual_word_size);
       
   207   }
       
   208   return result;
       
   209 }
       
   210 
       
   211 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
       
   212                                               size_t desired_word_size,
       
   213                                               size_t* actual_word_size,
       
   214                                               AllocationContext_t context) {
       
   215   assert(!_g1h->is_humongous(desired_word_size),
       
   216          "we should not be seeing humongous-size allocations in this path");
       
   217 
       
   218   HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
       
   219                                                                       desired_word_size,
       
   220                                                                       actual_word_size,
       
   221                                                                       true /* bot_updates */);
       
   222   if (result == NULL && !old_is_full(context)) {
       
   223     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
       
   224     result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
       
   225                                                                      desired_word_size,
       
   226                                                                      actual_word_size,
       
   227                                                                      true /* bot_updates */);
       
   228     if (result == NULL) {
       
   229       set_old_full(context);
       
   230     }
       
   231   }
       
   232   return result;
       
   233 }
       
   234 
       
   235 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
       
   236   _g1h(G1CollectedHeap::heap()),
       
   237   _allocator(allocator),
       
   238   _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
       
   239   for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
       
   240     _direct_allocated[i] = 0;
       
   241   }
       
   242 }
       
   243 
       
   244 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
       
   245   return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
       
   246 }
       
   247 
       
   248 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
       
   249                                                        size_t word_sz,
       
   250                                                        AllocationContext_t context,
       
   251                                                        bool* plab_refill_failed) {
       
   252   size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
       
   253   size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
       
   254 
       
   255   // Only get a new PLAB if the allocation fits and it would not waste more than
       
   256   // ParallelGCBufferWastePct in the existing buffer.
       
   257   if ((required_in_plab <= plab_word_size) &&
       
   258     may_throw_away_buffer(required_in_plab, plab_word_size)) {
       
   259 
       
   260     G1PLAB* alloc_buf = alloc_buffer(dest, context);
       
   261     alloc_buf->retire();
       
   262 
       
   263     size_t actual_plab_size = 0;
       
   264     HeapWord* buf = _allocator->par_allocate_during_gc(dest,
       
   265                                                        required_in_plab,
       
   266                                                        plab_word_size,
       
   267                                                        &actual_plab_size,
       
   268                                                        context);
       
   269 
       
   270     assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
       
   271            "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
       
   272            required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
       
   273 
       
   274     if (buf != NULL) {
       
   275       alloc_buf->set_buf(buf, actual_plab_size);
       
   276 
       
   277       HeapWord* const obj = alloc_buf->allocate(word_sz);
       
   278       assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
       
   279                           SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
       
   280                           word_sz, required_in_plab, plab_word_size);
       
   281       return obj;
       
   282     }
       
   283     // Otherwise.
       
   284     *plab_refill_failed = true;
       
   285   }
       
   286   // Try direct allocation.
       
   287   HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
       
   288   if (result != NULL) {
       
   289     _direct_allocated[dest.value()] += word_sz;
       
   290   }
       
   291   return result;
       
   292 }
       
   293 
       
   294 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
       
   295   alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
       
   296 }
       
   297 
       
   298 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
       
   299   G1PLABAllocator(allocator),
       
   300   _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
       
   301   _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
       
   302   for (uint state = 0; state < InCSetState::Num; state++) {
       
   303     _alloc_buffers[state] = NULL;
       
   304   }
       
   305   _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
       
   306   _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
       
   307 }
       
   308 
       
   309 void G1DefaultPLABAllocator::flush_and_retire_stats() {
       
   310   for (uint state = 0; state < InCSetState::Num; state++) {
       
   311     G1PLAB* const buf = _alloc_buffers[state];
       
   312     if (buf != NULL) {
       
   313       G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
       
   314       buf->flush_and_retire_stats(stats);
       
   315       stats->add_direct_allocated(_direct_allocated[state]);
       
   316       _direct_allocated[state] = 0;
       
   317     }
       
   318   }
       
   319 }
       
   320 
       
   321 void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
       
   322   wasted = 0;
       
   323   undo_wasted = 0;
       
   324   for (uint state = 0; state < InCSetState::Num; state++) {
       
   325     G1PLAB * const buf = _alloc_buffers[state];
       
   326     if (buf != NULL) {
       
   327       wasted += buf->waste();
       
   328       undo_wasted += buf->undo_waste();
       
   329     }
       
   330   }
       
   331 }
       
   332 
       
   333 bool G1ArchiveAllocator::_archive_check_enabled = false;
       
   334 G1ArchiveRegionMap G1ArchiveAllocator::_closed_archive_region_map;
       
   335 G1ArchiveRegionMap G1ArchiveAllocator::_open_archive_region_map;
       
   336 
       
   337 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
       
   338   // Create the archive allocator, and also enable archive object checking
       
   339   // in mark-sweep, since we will be creating archive regions.
       
   340   G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h, open);
       
   341   enable_archive_object_check();
       
   342   return result;
       
   343 }
       
   344 
       
   345 bool G1ArchiveAllocator::alloc_new_region() {
       
   346   // Allocate the highest free region in the reserved heap,
       
   347   // and add it to our list of allocated regions. It is marked
       
   348   // archive and added to the old set.
       
   349   HeapRegion* hr = _g1h->alloc_highest_free_region();
       
   350   if (hr == NULL) {
       
   351     return false;
       
   352   }
       
   353   assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
       
   354   if (_open) {
       
   355     hr->set_open_archive();
       
   356   } else {
       
   357     hr->set_closed_archive();
       
   358   }
       
   359   _g1h->old_set_add(hr);
       
   360   _g1h->hr_printer()->alloc(hr);
       
   361   _allocated_regions.append(hr);
       
   362   _allocation_region = hr;
       
   363 
       
   364   // Set up _bottom and _max to begin allocating in the lowest
       
   365   // min_region_size'd chunk of the allocated G1 region.
       
   366   _bottom = hr->bottom();
       
   367   _max = _bottom + HeapRegion::min_region_size_in_words();
       
   368 
       
   369   // Tell mark-sweep that objects in this region are not to be marked.
       
   370   set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open);
       
   371 
       
   372   // Since we've modified the old set, call update_sizes.
       
   373   _g1h->g1mm()->update_sizes();
       
   374   return true;
       
   375 }
       
   376 
       
   377 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
       
   378   assert(word_size != 0, "size must not be zero");
       
   379   if (_allocation_region == NULL) {
       
   380     if (!alloc_new_region()) {
       
   381       return NULL;
       
   382     }
       
   383   }
       
   384   HeapWord* old_top = _allocation_region->top();
       
   385   assert(_bottom >= _allocation_region->bottom(),
       
   386          "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
       
   387          p2i(_bottom), p2i(_allocation_region->bottom()));
       
   388   assert(_max <= _allocation_region->end(),
       
   389          "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
       
   390          p2i(_max), p2i(_allocation_region->end()));
       
   391   assert(_bottom <= old_top && old_top <= _max,
       
   392          "inconsistent allocation state: expected "
       
   393          PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
       
   394          p2i(_bottom), p2i(old_top), p2i(_max));
       
   395 
       
   396   // Allocate the next word_size words in the current allocation chunk.
       
   397   // If allocation would cross the _max boundary, insert a filler and begin
       
   398   // at the base of the next min_region_size'd chunk. Also advance to the next
       
   399   // chunk if we don't yet cross the boundary, but the remainder would be too
       
   400   // small to fill.
       
   401   HeapWord* new_top = old_top + word_size;
       
   402   size_t remainder = pointer_delta(_max, new_top);
       
   403   if ((new_top > _max) ||
       
   404       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
       
   405     if (old_top != _max) {
       
   406       size_t fill_size = pointer_delta(_max, old_top);
       
   407       CollectedHeap::fill_with_object(old_top, fill_size);
       
   408       _summary_bytes_used += fill_size * HeapWordSize;
       
   409     }
       
   410     _allocation_region->set_top(_max);
       
   411     old_top = _bottom = _max;
       
   412 
       
   413     // Check if we've just used up the last min_region_size'd chunk
       
   414     // in the current region, and if so, allocate a new one.
       
   415     if (_bottom != _allocation_region->end()) {
       
   416       _max = _bottom + HeapRegion::min_region_size_in_words();
       
   417     } else {
       
   418       if (!alloc_new_region()) {
       
   419         return NULL;
       
   420       }
       
   421       old_top = _allocation_region->bottom();
       
   422     }
       
   423   }
       
   424   _allocation_region->set_top(old_top + word_size);
       
   425   _summary_bytes_used += word_size * HeapWordSize;
       
   426 
       
   427   return old_top;
       
   428 }
       
   429 
       
   430 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
       
   431                                           size_t end_alignment_in_bytes) {
       
   432   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
       
   433          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
       
   434   assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
       
   435          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
       
   436 
       
   437   // If we've allocated nothing, simply return.
       
   438   if (_allocation_region == NULL) {
       
   439     return;
       
   440   }
       
   441 
       
   442   // If an end alignment was requested, insert filler objects.
       
   443   if (end_alignment_in_bytes != 0) {
       
   444     HeapWord* currtop = _allocation_region->top();
       
   445     HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
       
   446     size_t fill_size = pointer_delta(newtop, currtop);
       
   447     if (fill_size != 0) {
       
   448       if (fill_size < CollectedHeap::min_fill_size()) {
       
   449         // If the required fill is smaller than we can represent,
       
   450         // bump up to the next aligned address. We know we won't exceed the current
       
   451         // region boundary because the max supported alignment is smaller than the min
       
   452         // region size, and because the allocation code never leaves space smaller than
       
   453         // the min_fill_size at the top of the current allocation region.
       
   454         newtop = align_up(currtop + CollectedHeap::min_fill_size(),
       
   455                           end_alignment_in_bytes);
       
   456         fill_size = pointer_delta(newtop, currtop);
       
   457       }
       
   458       HeapWord* fill = archive_mem_allocate(fill_size);
       
   459       CollectedHeap::fill_with_objects(fill, fill_size);
       
   460     }
       
   461   }
       
   462 
       
   463   // Loop through the allocated regions, and create MemRegions summarizing
       
   464   // the allocated address range, combining contiguous ranges. Add the
       
   465   // MemRegions to the GrowableArray provided by the caller.
       
   466   int index = _allocated_regions.length() - 1;
       
   467   assert(_allocated_regions.at(index) == _allocation_region,
       
   468          "expected region %u at end of array, found %u",
       
   469          _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
       
   470   HeapWord* base_address = _allocation_region->bottom();
       
   471   HeapWord* top = base_address;
       
   472 
       
   473   while (index >= 0) {
       
   474     HeapRegion* next = _allocated_regions.at(index);
       
   475     HeapWord* new_base = next->bottom();
       
   476     HeapWord* new_top = next->top();
       
   477     if (new_base != top) {
       
   478       ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
       
   479       base_address = new_base;
       
   480     }
       
   481     top = new_top;
       
   482     index = index - 1;
       
   483   }
       
   484 
       
   485   assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
       
   486   ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
       
   487   _allocated_regions.clear();
       
   488   _allocation_region = NULL;
       
   489 };