hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
changeset 13507 9e34c6395069
parent 13506 edce301a53ed
parent 13505 64383cfca104
child 13508 7c6aa31ff1b2
equal deleted inserted replaced
13506:edce301a53ed 13507:9e34c6395069
     1 /*
       
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
       
    27 #include "memory/sharedHeap.hpp"
       
    28 #include "oops/arrayOop.hpp"
       
    29 #include "oops/oop.inline.hpp"
       
    30 
       
    31 ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
       
    32   _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
       
    33   _end(NULL), _hard_end(NULL),
       
    34   _retained(false), _retained_filler(),
       
    35   _allocated(0), _wasted(0)
       
    36 {
       
    37   assert (min_size() > AlignmentReserve, "Inconsistency!");
       
    38   // arrayOopDesc::header_size depends on command line initialization.
       
    39   FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
       
    40   AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
       
    41 }
       
    42 
       
    43 size_t ParGCAllocBuffer::FillerHeaderSize;
       
    44 
       
    45 // If the minimum object size is greater than MinObjAlignment, we can
       
    46 // end up with a shard at the end of the buffer that's smaller than
       
    47 // the smallest object.  We can't allow that because the buffer must
       
    48 // look like it's full of objects when we retire it, so we make
       
    49 // sure we have enough space for a filler int array object.
       
    50 size_t ParGCAllocBuffer::AlignmentReserve;
       
    51 
       
    52 void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
       
    53   assert(!retain || end_of_gc, "Can only retain at GC end.");
       
    54   if (_retained) {
       
    55     // If the buffer had been retained shorten the previous filler object.
       
    56     assert(_retained_filler.end() <= _top, "INVARIANT");
       
    57     CollectedHeap::fill_with_object(_retained_filler);
       
    58     // Wasted space book-keeping, otherwise (normally) done in invalidate()
       
    59     _wasted += _retained_filler.word_size();
       
    60     _retained = false;
       
    61   }
       
    62   assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
       
    63   if (_top < _hard_end) {
       
    64     CollectedHeap::fill_with_object(_top, _hard_end);
       
    65     if (!retain) {
       
    66       invalidate();
       
    67     } else {
       
    68       // Is there wasted space we'd like to retain for the next GC?
       
    69       if (pointer_delta(_end, _top) > FillerHeaderSize) {
       
    70         _retained = true;
       
    71         _retained_filler = MemRegion(_top, FillerHeaderSize);
       
    72         _top = _top + FillerHeaderSize;
       
    73       } else {
       
    74         invalidate();
       
    75       }
       
    76     }
       
    77   }
       
    78 }
       
    79 
       
    80 void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
       
    81   assert(ResizePLAB, "Wasted work");
       
    82   stats->add_allocated(_allocated);
       
    83   stats->add_wasted(_wasted);
       
    84   stats->add_unused(pointer_delta(_end, _top));
       
    85 }
       
    86 
       
    87 // Compute desired plab size and latch result for later
       
    88 // use. This should be called once at the end of parallel
       
    89 // scavenge; it clears the sensor accumulators.
       
    90 void PLABStats::adjust_desired_plab_sz() {
       
    91   assert(ResizePLAB, "Not set");
       
    92   if (_allocated == 0) {
       
    93     assert(_unused == 0, "Inconsistency in PLAB stats");
       
    94     _allocated = 1;
       
    95   }
       
    96   double wasted_frac    = (double)_unused/(double)_allocated;
       
    97   size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
       
    98                                    TargetPLABWastePct);
       
    99   if (target_refills == 0) {
       
   100     target_refills = 1;
       
   101   }
       
   102   _used = _allocated - _wasted - _unused;
       
   103   size_t plab_sz = _used/(target_refills*ParallelGCThreads);
       
   104   if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
       
   105   // Take historical weighted average
       
   106   _filter.sample(plab_sz);
       
   107   // Clip from above and below, and align to object boundary
       
   108   plab_sz = MAX2(min_size(), (size_t)_filter.average());
       
   109   plab_sz = MIN2(max_size(), plab_sz);
       
   110   plab_sz = align_object_size(plab_sz);
       
   111   // Latch the result
       
   112   if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
       
   113   if (ResizePLAB) {
       
   114     _desired_plab_sz = plab_sz;
       
   115   }
       
   116   // Now clear the accumulators for next round:
       
   117   // note this needs to be fixed in the case where we
       
   118   // are retaining across scavenges. FIX ME !!! XXX
       
   119   _allocated = 0;
       
   120   _wasted    = 0;
       
   121   _unused    = 0;
       
   122 }
       
   123 
       
   124 #ifndef PRODUCT
       
   125 void ParGCAllocBuffer::print() {
       
   126   gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
       
   127              "_retained: %c _retained_filler: [%p,%p)\n",
       
   128              _bottom, _top, _end, _hard_end,
       
   129              "FT"[_retained], _retained_filler.start(), _retained_filler.end());
       
   130 }
       
   131 #endif // !PRODUCT
       
   132 
       
   133 const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
       
   134 MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
       
   135      ((size_t)Generation::GenGrain)/HeapWordSize);
       
   136 const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
       
   137 MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
       
   138      (size_t)Generation::GenGrain);
       
   139 
       
   140 ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
       
   141                                                  BlockOffsetSharedArray* bsa) :
       
   142   ParGCAllocBuffer(word_sz),
       
   143   _bsa(bsa),
       
   144   _bt(bsa, MemRegion(_bottom, _hard_end)),
       
   145   _true_end(_hard_end)
       
   146 {}
       
   147 
       
   148 // The buffer comes with its own BOT, with a shared (obviously) underlying
       
   149 // BlockOffsetSharedArray. We manipulate this BOT in the normal way
       
   150 // as we would for any contiguous space. However, on accasion we
       
   151 // need to do some buffer surgery at the extremities before we
       
   152 // start using the body of the buffer for allocations. Such surgery
       
   153 // (as explained elsewhere) is to prevent allocation on a card that
       
   154 // is in the process of being walked concurrently by another GC thread.
       
   155 // When such surgery happens at a point that is far removed (to the
       
   156 // right of the current allocation point, top), we use the "contig"
       
   157 // parameter below to directly manipulate the shared array without
       
   158 // modifying the _next_threshold state in the BOT.
       
   159 void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
       
   160                                                      bool contig) {
       
   161   CollectedHeap::fill_with_object(mr);
       
   162   if (contig) {
       
   163     _bt.alloc_block(mr.start(), mr.end());
       
   164   } else {
       
   165     _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
       
   166   }
       
   167 }
       
   168 
       
   169 HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
       
   170   HeapWord* res = NULL;
       
   171   if (_true_end > _hard_end) {
       
   172     assert((HeapWord*)align_size_down(intptr_t(_hard_end),
       
   173                                       ChunkSizeInBytes) == _hard_end,
       
   174            "or else _true_end should be equal to _hard_end");
       
   175     assert(_retained, "or else _true_end should be equal to _hard_end");
       
   176     assert(_retained_filler.end() <= _top, "INVARIANT");
       
   177     CollectedHeap::fill_with_object(_retained_filler);
       
   178     if (_top < _hard_end) {
       
   179       fill_region_with_block(MemRegion(_top, _hard_end), true);
       
   180     }
       
   181     HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
       
   182     _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
       
   183     _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
       
   184     _top      = _retained_filler.end();
       
   185     _hard_end = next_hard_end;
       
   186     _end      = _hard_end - AlignmentReserve;
       
   187     res       = ParGCAllocBuffer::allocate(word_sz);
       
   188     if (res != NULL) {
       
   189       _bt.alloc_block(res, word_sz);
       
   190     }
       
   191   }
       
   192   return res;
       
   193 }
       
   194 
       
   195 void
       
   196 ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
       
   197   ParGCAllocBuffer::undo_allocation(obj, word_sz);
       
   198   // This may back us up beyond the previous threshold, so reset.
       
   199   _bt.set_region(MemRegion(_top, _hard_end));
       
   200   _bt.initialize_threshold();
       
   201 }
       
   202 
       
   203 void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
       
   204   assert(!retain || end_of_gc, "Can only retain at GC end.");
       
   205   if (_retained) {
       
   206     // We're about to make the retained_filler into a block.
       
   207     _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
       
   208                                       _retained_filler.end());
       
   209   }
       
   210   // Reset _hard_end to _true_end (and update _end)
       
   211   if (retain && _hard_end != NULL) {
       
   212     assert(_hard_end <= _true_end, "Invariant.");
       
   213     _hard_end = _true_end;
       
   214     _end      = MAX2(_top, _hard_end - AlignmentReserve);
       
   215     assert(_end <= _hard_end, "Invariant.");
       
   216   }
       
   217   _true_end = _hard_end;
       
   218   HeapWord* pre_top = _top;
       
   219 
       
   220   ParGCAllocBuffer::retire(end_of_gc, retain);
       
   221   // Now any old _retained_filler is cut back to size, the free part is
       
   222   // filled with a filler object, and top is past the header of that
       
   223   // object.
       
   224 
       
   225   if (retain && _top < _end) {
       
   226     assert(end_of_gc && retain, "Or else retain should be false.");
       
   227     // If the lab does not start on a card boundary, we don't want to
       
   228     // allocate onto that card, since that might lead to concurrent
       
   229     // allocation and card scanning, which we don't support.  So we fill
       
   230     // the first card with a garbage object.
       
   231     size_t first_card_index = _bsa->index_for(pre_top);
       
   232     HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
       
   233     if (first_card_start < pre_top) {
       
   234       HeapWord* second_card_start =
       
   235         _bsa->inc_by_region_size(first_card_start);
       
   236 
       
   237       // Ensure enough room to fill with the smallest block
       
   238       second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
       
   239 
       
   240       // If the end is already in the first card, don't go beyond it!
       
   241       // Or if the remainder is too small for a filler object, gobble it up.
       
   242       if (_hard_end < second_card_start ||
       
   243           pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
       
   244         second_card_start = _hard_end;
       
   245       }
       
   246       if (pre_top < second_card_start) {
       
   247         MemRegion first_card_suffix(pre_top, second_card_start);
       
   248         fill_region_with_block(first_card_suffix, true);
       
   249       }
       
   250       pre_top = second_card_start;
       
   251       _top = pre_top;
       
   252       _end = MAX2(_top, _hard_end - AlignmentReserve);
       
   253     }
       
   254 
       
   255     // If the lab does not end on a card boundary, we don't want to
       
   256     // allocate onto that card, since that might lead to concurrent
       
   257     // allocation and card scanning, which we don't support.  So we fill
       
   258     // the last card with a garbage object.
       
   259     size_t last_card_index = _bsa->index_for(_hard_end);
       
   260     HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
       
   261     if (last_card_start < _hard_end) {
       
   262 
       
   263       // Ensure enough room to fill with the smallest block
       
   264       last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
       
   265 
       
   266       // If the top is already in the last card, don't go back beyond it!
       
   267       // Or if the remainder is too small for a filler object, gobble it up.
       
   268       if (_top > last_card_start ||
       
   269           pointer_delta(last_card_start, _top) < AlignmentReserve) {
       
   270         last_card_start = _top;
       
   271       }
       
   272       if (last_card_start < _hard_end) {
       
   273         MemRegion last_card_prefix(last_card_start, _hard_end);
       
   274         fill_region_with_block(last_card_prefix, false);
       
   275       }
       
   276       _hard_end = last_card_start;
       
   277       _end      = MAX2(_top, _hard_end - AlignmentReserve);
       
   278       _true_end = _hard_end;
       
   279       assert(_end <= _hard_end, "Invariant.");
       
   280     }
       
   281 
       
   282     // At this point:
       
   283     //   1) we had a filler object from the original top to hard_end.
       
   284     //   2) We've filled in any partial cards at the front and back.
       
   285     if (pre_top < _hard_end) {
       
   286       // Now we can reset the _bt to do allocation in the given area.
       
   287       MemRegion new_filler(pre_top, _hard_end);
       
   288       fill_region_with_block(new_filler, false);
       
   289       _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
       
   290       // If there's no space left, don't retain.
       
   291       if (_top >= _end) {
       
   292         _retained = false;
       
   293         invalidate();
       
   294         return;
       
   295       }
       
   296       _retained_filler = MemRegion(pre_top, _top);
       
   297       _bt.set_region(MemRegion(_top, _hard_end));
       
   298       _bt.initialize_threshold();
       
   299       assert(_bt.threshold() > _top, "initialize_threshold failed!");
       
   300 
       
   301       // There may be other reasons for queries into the middle of the
       
   302       // filler object.  When such queries are done in parallel with
       
   303       // allocation, bad things can happen, if the query involves object
       
   304       // iteration.  So we ensure that such queries do not involve object
       
   305       // iteration, by putting another filler object on the boundaries of
       
   306       // such queries.  One such is the object spanning a parallel card
       
   307       // chunk boundary.
       
   308 
       
   309       // "chunk_boundary" is the address of the first chunk boundary less
       
   310       // than "hard_end".
       
   311       HeapWord* chunk_boundary =
       
   312         (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
       
   313       assert(chunk_boundary < _hard_end, "Or else above did not work.");
       
   314       assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
       
   315              "Consequence of last card handling above.");
       
   316 
       
   317       if (_top <= chunk_boundary) {
       
   318         assert(_true_end == _hard_end, "Invariant.");
       
   319         while (_top <= chunk_boundary) {
       
   320           assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
       
   321                  "Consequence of last card handling above.");
       
   322           _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
       
   323           CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
       
   324           _hard_end = chunk_boundary;
       
   325           chunk_boundary -= ChunkSizeInWords;
       
   326         }
       
   327         _end = _hard_end - AlignmentReserve;
       
   328         assert(_top <= _end, "Invariant.");
       
   329         // Now reset the initial filler chunk so it doesn't overlap with
       
   330         // the one(s) inserted above.
       
   331         MemRegion new_filler(pre_top, _hard_end);
       
   332         fill_region_with_block(new_filler, false);
       
   333       }
       
   334     } else {
       
   335       _retained = false;
       
   336       invalidate();
       
   337     }
       
   338   } else {
       
   339     assert(!end_of_gc ||
       
   340            (!_retained && _true_end == _hard_end), "Checking.");
       
   341   }
       
   342   assert(_end <= _hard_end, "Invariant.");
       
   343   assert(_top < _end || _top == _hard_end, "Invariant");
       
   344 }