hotspot/src/share/vm/gc/g1/g1Allocator.cpp
changeset 31346 a70d45c06136
parent 31331 a7c714b6cfb3
child 31632 d041b34dd3e7
equal deleted inserted replaced
31345:1bba15125d8d 31346:a70d45c06136
    24 
    24 
    25 #include "precompiled.hpp"
    25 #include "precompiled.hpp"
    26 #include "gc/g1/g1Allocator.hpp"
    26 #include "gc/g1/g1Allocator.hpp"
    27 #include "gc/g1/g1CollectedHeap.inline.hpp"
    27 #include "gc/g1/g1CollectedHeap.inline.hpp"
    28 #include "gc/g1/g1CollectorPolicy.hpp"
    28 #include "gc/g1/g1CollectorPolicy.hpp"
       
    29 #include "gc/g1/g1MarkSweep.hpp"
    29 #include "gc/g1/heapRegion.inline.hpp"
    30 #include "gc/g1/heapRegion.inline.hpp"
    30 #include "gc/g1/heapRegionSet.inline.hpp"
    31 #include "gc/g1/heapRegionSet.inline.hpp"
    31 
    32 
    32 void G1DefaultAllocator::init_mutator_alloc_region() {
    33 void G1DefaultAllocator::init_mutator_alloc_region() {
    33   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
    34   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
    42 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
    43 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
    43                                             OldGCAllocRegion* old,
    44                                             OldGCAllocRegion* old,
    44                                             HeapRegion** retained_old) {
    45                                             HeapRegion** retained_old) {
    45   HeapRegion* retained_region = *retained_old;
    46   HeapRegion* retained_region = *retained_old;
    46   *retained_old = NULL;
    47   *retained_old = NULL;
       
    48   assert(retained_region == NULL || !retained_region->is_archive(),
       
    49          err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
    47 
    50 
    48   // We will discard the current GC alloc region if:
    51   // We will discard the current GC alloc region if:
    49   // a) it's in the collection set (it can happen!),
    52   // a) it's in the collection set (it can happen!),
    50   // b) it's already full (no point in using it),
    53   // b) it's already full (no point in using it),
    51   // c) it's empty (this means that it was emptied during
    54   // c) it's empty (this means that it was emptied during
   166       wasted += buf->waste();
   169       wasted += buf->waste();
   167       undo_wasted += buf->undo_waste();
   170       undo_wasted += buf->undo_waste();
   168     }
   171     }
   169   }
   172   }
   170 }
   173 }
       
   174 
       
   175 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
       
   176   // Create the archive allocator, and also enable archive object checking
       
   177   // in mark-sweep, since we will be creating archive regions.
       
   178   G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
       
   179   G1MarkSweep::enable_archive_object_check();
       
   180   return result;
       
   181 }
       
   182 
       
   183 bool G1ArchiveAllocator::alloc_new_region() {
       
   184   // Allocate the highest free region in the reserved heap,
       
   185   // and add it to our list of allocated regions. It is marked
       
   186   // archive and added to the old set.
       
   187   HeapRegion* hr = _g1h->alloc_highest_free_region();
       
   188   if (hr == NULL) {
       
   189     return false;
       
   190   }
       
   191   assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
       
   192   hr->set_archive();
       
   193   _g1h->_old_set.add(hr);
       
   194   _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
       
   195   _allocated_regions.append(hr);
       
   196   _allocation_region = hr;
       
   197 
       
   198   // Set up _bottom and _max to begin allocating in the lowest
       
   199   // min_region_size'd chunk of the allocated G1 region.
       
   200   _bottom = hr->bottom();
       
   201   _max = _bottom + HeapRegion::min_region_size_in_words();
       
   202 
       
   203   // Tell mark-sweep that objects in this region are not to be marked.
       
   204   G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
       
   205 
       
   206   // Since we've modified the old set, call update_sizes.
       
   207   _g1h->g1mm()->update_sizes();
       
   208   return true;
       
   209 }
       
   210 
       
   211 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
       
   212   assert(word_size != 0, "size must not be zero");
       
   213   if (_allocation_region == NULL) {
       
   214     if (!alloc_new_region()) {
       
   215       return NULL;
       
   216     }
       
   217   }
       
   218   HeapWord* old_top = _allocation_region->top();
       
   219   assert(_bottom >= _allocation_region->bottom(),
       
   220          err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
       
   221                  p2i(_bottom), p2i(_allocation_region->bottom())));
       
   222   assert(_max <= _allocation_region->end(),
       
   223          err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
       
   224                  p2i(_max), p2i(_allocation_region->end())));
       
   225   assert(_bottom <= old_top && old_top <= _max,
       
   226          err_msg("inconsistent allocation state: expected "
       
   227                  PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
       
   228                  p2i(_bottom), p2i(old_top), p2i(_max)));
       
   229 
       
   230   // Allocate the next word_size words in the current allocation chunk.
       
   231   // If allocation would cross the _max boundary, insert a filler and begin
       
   232   // at the base of the next min_region_size'd chunk. Also advance to the next
       
   233   // chunk if we don't yet cross the boundary, but the remainder would be too
       
   234   // small to fill.
       
   235   HeapWord* new_top = old_top + word_size;
       
   236   size_t remainder = pointer_delta(_max, new_top);
       
   237   if ((new_top > _max) ||
       
   238       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
       
   239     if (old_top != _max) {
       
   240       size_t fill_size = pointer_delta(_max, old_top);
       
   241       CollectedHeap::fill_with_object(old_top, fill_size);
       
   242       _summary_bytes_used += fill_size * HeapWordSize;
       
   243     }
       
   244     _allocation_region->set_top(_max);
       
   245     old_top = _bottom = _max;
       
   246 
       
   247     // Check if we've just used up the last min_region_size'd chunk
       
   248     // in the current region, and if so, allocate a new one.
       
   249     if (_bottom != _allocation_region->end()) {
       
   250       _max = _bottom + HeapRegion::min_region_size_in_words();
       
   251     } else {
       
   252       if (!alloc_new_region()) {
       
   253         return NULL;
       
   254       }
       
   255       old_top = _allocation_region->bottom();
       
   256     }
       
   257   }
       
   258   _allocation_region->set_top(old_top + word_size);
       
   259   _summary_bytes_used += word_size * HeapWordSize;
       
   260 
       
   261   return old_top;
       
   262 }
       
   263 
       
   264 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
       
   265                                           size_t end_alignment_in_bytes) {
       
   266   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
       
   267           err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
       
   268   assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
       
   269          err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
       
   270 
       
   271   // If we've allocated nothing, simply return.
       
   272   if (_allocation_region == NULL) {
       
   273     return;
       
   274   }
       
   275 
       
   276   // If an end alignment was requested, insert filler objects.
       
   277   if (end_alignment_in_bytes != 0) {
       
   278     HeapWord* currtop = _allocation_region->top();
       
   279     HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
       
   280     size_t fill_size = pointer_delta(newtop, currtop);
       
   281     if (fill_size != 0) {
       
   282       if (fill_size < CollectedHeap::min_fill_size()) {
       
   283         // If the required fill is smaller than we can represent,
       
   284         // bump up to the next aligned address. We know we won't exceed the current
       
   285         // region boundary because the max supported alignment is smaller than the min
       
   286         // region size, and because the allocation code never leaves space smaller than
       
   287         // the min_fill_size at the top of the current allocation region.
       
   288         newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
       
   289                                              end_alignment_in_bytes);
       
   290         fill_size = pointer_delta(newtop, currtop);
       
   291       }
       
   292       HeapWord* fill = archive_mem_allocate(fill_size);
       
   293       CollectedHeap::fill_with_objects(fill, fill_size);
       
   294     }
       
   295   }
       
   296 
       
   297   // Loop through the allocated regions, and create MemRegions summarizing
       
   298   // the allocated address range, combining contiguous ranges. Add the
       
   299   // MemRegions to the GrowableArray provided by the caller.
       
   300   int index = _allocated_regions.length() - 1;
       
   301   assert(_allocated_regions.at(index) == _allocation_region,
       
   302          err_msg("expected region %u at end of array, found %u",
       
   303                  _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
       
   304   HeapWord* base_address = _allocation_region->bottom();
       
   305   HeapWord* top = base_address;
       
   306 
       
   307   while (index >= 0) {
       
   308     HeapRegion* next = _allocated_regions.at(index);
       
   309     HeapWord* new_base = next->bottom();
       
   310     HeapWord* new_top = next->top();
       
   311     if (new_base != top) {
       
   312       ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
       
   313       base_address = new_base;
       
   314     }
       
   315     top = new_top;
       
   316     index = index - 1;
       
   317   }
       
   318 
       
   319   assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
       
   320   ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
       
   321   _allocated_regions.clear();
       
   322   _allocation_region = NULL;
       
   323 };