src/hotspot/share/gc/serial/defNewGeneration.cpp
changeset 47216 71c04702a3d5
parent 46795 623a5e42deb6
child 47580 96392e113a0a
equal deleted inserted replaced
47215:4ebc2e2fb97c 47216:71c04702a3d5
       
     1 /*
       
     2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "gc/serial/defNewGeneration.inline.hpp"
       
    27 #include "gc/shared/ageTable.inline.hpp"
       
    28 #include "gc/shared/cardTableRS.hpp"
       
    29 #include "gc/shared/collectorCounters.hpp"
       
    30 #include "gc/shared/gcHeapSummary.hpp"
       
    31 #include "gc/shared/gcLocker.inline.hpp"
       
    32 #include "gc/shared/gcPolicyCounters.hpp"
       
    33 #include "gc/shared/gcTimer.hpp"
       
    34 #include "gc/shared/gcTrace.hpp"
       
    35 #include "gc/shared/gcTraceTime.inline.hpp"
       
    36 #include "gc/shared/genCollectedHeap.hpp"
       
    37 #include "gc/shared/genOopClosures.inline.hpp"
       
    38 #include "gc/shared/generationSpec.hpp"
       
    39 #include "gc/shared/preservedMarks.inline.hpp"
       
    40 #include "gc/shared/referencePolicy.hpp"
       
    41 #include "gc/shared/space.inline.hpp"
       
    42 #include "gc/shared/spaceDecorator.hpp"
       
    43 #include "gc/shared/strongRootsScope.hpp"
       
    44 #include "logging/log.hpp"
       
    45 #include "memory/iterator.hpp"
       
    46 #include "memory/resourceArea.hpp"
       
    47 #include "oops/instanceRefKlass.hpp"
       
    48 #include "oops/oop.inline.hpp"
       
    49 #include "runtime/atomic.hpp"
       
    50 #include "runtime/java.hpp"
       
    51 #include "runtime/prefetch.inline.hpp"
       
    52 #include "runtime/thread.inline.hpp"
       
    53 #include "utilities/align.hpp"
       
    54 #include "utilities/copy.hpp"
       
    55 #include "utilities/globalDefinitions.hpp"
       
    56 #include "utilities/stack.inline.hpp"
       
    57 #if INCLUDE_ALL_GCS
       
    58 #include "gc/cms/parOopClosures.hpp"
       
    59 #endif
       
    60 
       
    61 //
       
    62 // DefNewGeneration functions.
       
    63 
       
    64 // Methods of protected closure types.
       
    65 
       
    66 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
       
    67   assert(_young_gen->kind() == Generation::ParNew ||
       
    68          _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
       
    69 }
       
    70 
       
    71 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
       
    72   return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
       
    73 }
       
    74 
       
    75 DefNewGeneration::KeepAliveClosure::
       
    76 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
       
    77   _rs = GenCollectedHeap::heap()->rem_set();
       
    78 }
       
    79 
       
    80 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
       
    81 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
       
    82 
       
    83 
       
    84 DefNewGeneration::FastKeepAliveClosure::
       
    85 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
       
    86   DefNewGeneration::KeepAliveClosure(cl) {
       
    87   _boundary = g->reserved().end();
       
    88 }
       
    89 
       
    90 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
       
    91 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
       
    92 
       
    93 DefNewGeneration::EvacuateFollowersClosure::
       
    94 EvacuateFollowersClosure(GenCollectedHeap* gch,
       
    95                          ScanClosure* cur,
       
    96                          ScanClosure* older) :
       
    97   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
       
    98 {}
       
    99 
       
   100 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
       
   101   do {
       
   102     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
       
   103   } while (!_gch->no_allocs_since_save_marks());
       
   104 }
       
   105 
       
   106 DefNewGeneration::FastEvacuateFollowersClosure::
       
   107 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
       
   108                              FastScanClosure* cur,
       
   109                              FastScanClosure* older) :
       
   110   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
       
   111 {
       
   112   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
       
   113   _young_gen = (DefNewGeneration*)_gch->young_gen();
       
   114 }
       
   115 
       
   116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
       
   117   do {
       
   118     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
       
   119   } while (!_gch->no_allocs_since_save_marks());
       
   120   guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
       
   121 }
       
   122 
       
   123 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
       
   124     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
       
   125 {
       
   126   _boundary = _g->reserved().end();
       
   127 }
       
   128 
       
   129 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
       
   130 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
       
   131 
       
   132 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
       
   133     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
       
   134 {
       
   135   _boundary = _g->reserved().end();
       
   136 }
       
   137 
       
   138 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
       
   139 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
       
   140 
       
   141 void KlassScanClosure::do_klass(Klass* klass) {
       
   142   NOT_PRODUCT(ResourceMark rm);
       
   143   log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
       
   144                                   p2i(klass),
       
   145                                   klass->external_name(),
       
   146                                   klass->has_modified_oops() ? "true" : "false");
       
   147 
       
   148   // If the klass has not been dirtied we know that there's
       
   149   // no references into  the young gen and we can skip it.
       
   150   if (klass->has_modified_oops()) {
       
   151     if (_accumulate_modified_oops) {
       
   152       klass->accumulate_modified_oops();
       
   153     }
       
   154 
       
   155     // Clear this state since we're going to scavenge all the metadata.
       
   156     klass->clear_modified_oops();
       
   157 
       
   158     // Tell the closure which Klass is being scanned so that it can be dirtied
       
   159     // if oops are left pointing into the young gen.
       
   160     _scavenge_closure->set_scanned_klass(klass);
       
   161 
       
   162     klass->oops_do(_scavenge_closure);
       
   163 
       
   164     _scavenge_closure->set_scanned_klass(NULL);
       
   165   }
       
   166 }
       
   167 
       
   168 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
       
   169   _g(g)
       
   170 {
       
   171   _boundary = _g->reserved().end();
       
   172 }
       
   173 
       
   174 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
       
   175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
       
   176 
       
   177 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
       
   178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
       
   179 
       
   180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
       
   181                                    KlassRemSet* klass_rem_set)
       
   182     : _scavenge_closure(scavenge_closure),
       
   183       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
       
   184 
       
   185 
       
   186 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
       
   187                                    size_t initial_size,
       
   188                                    const char* policy)
       
   189   : Generation(rs, initial_size),
       
   190     _preserved_marks_set(false /* in_c_heap */),
       
   191     _promo_failure_drain_in_progress(false),
       
   192     _should_allocate_from_space(false)
       
   193 {
       
   194   MemRegion cmr((HeapWord*)_virtual_space.low(),
       
   195                 (HeapWord*)_virtual_space.high());
       
   196   GenCollectedHeap* gch = GenCollectedHeap::heap();
       
   197 
       
   198   gch->barrier_set()->resize_covered_region(cmr);
       
   199 
       
   200   _eden_space = new ContiguousSpace();
       
   201   _from_space = new ContiguousSpace();
       
   202   _to_space   = new ContiguousSpace();
       
   203 
       
   204   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
       
   205     vm_exit_during_initialization("Could not allocate a new gen space");
       
   206   }
       
   207 
       
   208   // Compute the maximum eden and survivor space sizes. These sizes
       
   209   // are computed assuming the entire reserved space is committed.
       
   210   // These values are exported as performance counters.
       
   211   uintx alignment = gch->collector_policy()->space_alignment();
       
   212   uintx size = _virtual_space.reserved_size();
       
   213   _max_survivor_size = compute_survivor_size(size, alignment);
       
   214   _max_eden_size = size - (2*_max_survivor_size);
       
   215 
       
   216   // allocate the performance counters
       
   217   GenCollectorPolicy* gcp = gch->gen_policy();
       
   218 
       
   219   // Generation counters -- generation 0, 3 subspaces
       
   220   _gen_counters = new GenerationCounters("new", 0, 3,
       
   221       gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
       
   222   _gc_counters = new CollectorCounters(policy, 0);
       
   223 
       
   224   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
       
   225                                       _gen_counters);
       
   226   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
       
   227                                       _gen_counters);
       
   228   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
       
   229                                     _gen_counters);
       
   230 
       
   231   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
       
   232   update_counters();
       
   233   _old_gen = NULL;
       
   234   _tenuring_threshold = MaxTenuringThreshold;
       
   235   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
       
   236 
       
   237   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
       
   238 }
       
   239 
       
   240 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
       
   241                                                 bool clear_space,
       
   242                                                 bool mangle_space) {
       
   243   uintx alignment =
       
   244     GenCollectedHeap::heap()->collector_policy()->space_alignment();
       
   245 
       
   246   // If the spaces are being cleared (only done at heap initialization
       
   247   // currently), the survivor spaces need not be empty.
       
   248   // Otherwise, no care is taken for used areas in the survivor spaces
       
   249   // so check.
       
   250   assert(clear_space || (to()->is_empty() && from()->is_empty()),
       
   251     "Initialization of the survivor spaces assumes these are empty");
       
   252 
       
   253   // Compute sizes
       
   254   uintx size = _virtual_space.committed_size();
       
   255   uintx survivor_size = compute_survivor_size(size, alignment);
       
   256   uintx eden_size = size - (2*survivor_size);
       
   257   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
       
   258 
       
   259   if (eden_size < minimum_eden_size) {
       
   260     // May happen due to 64Kb rounding, if so adjust eden size back up
       
   261     minimum_eden_size = align_up(minimum_eden_size, alignment);
       
   262     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
       
   263     uintx unaligned_survivor_size =
       
   264       align_down(maximum_survivor_size, alignment);
       
   265     survivor_size = MAX2(unaligned_survivor_size, alignment);
       
   266     eden_size = size - (2*survivor_size);
       
   267     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
       
   268     assert(eden_size >= minimum_eden_size, "just checking");
       
   269   }
       
   270 
       
   271   char *eden_start = _virtual_space.low();
       
   272   char *from_start = eden_start + eden_size;
       
   273   char *to_start   = from_start + survivor_size;
       
   274   char *to_end     = to_start   + survivor_size;
       
   275 
       
   276   assert(to_end == _virtual_space.high(), "just checking");
       
   277   assert(Space::is_aligned(eden_start), "checking alignment");
       
   278   assert(Space::is_aligned(from_start), "checking alignment");
       
   279   assert(Space::is_aligned(to_start),   "checking alignment");
       
   280 
       
   281   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
       
   282   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
       
   283   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
       
   284 
       
   285   // A minimum eden size implies that there is a part of eden that
       
   286   // is being used and that affects the initialization of any
       
   287   // newly formed eden.
       
   288   bool live_in_eden = minimum_eden_size > 0;
       
   289 
       
   290   // If not clearing the spaces, do some checking to verify that
       
   291   // the space are already mangled.
       
   292   if (!clear_space) {
       
   293     // Must check mangling before the spaces are reshaped.  Otherwise,
       
   294     // the bottom or end of one space may have moved into another
       
   295     // a failure of the check may not correctly indicate which space
       
   296     // is not properly mangled.
       
   297     if (ZapUnusedHeapArea) {
       
   298       HeapWord* limit = (HeapWord*) _virtual_space.high();
       
   299       eden()->check_mangled_unused_area(limit);
       
   300       from()->check_mangled_unused_area(limit);
       
   301         to()->check_mangled_unused_area(limit);
       
   302     }
       
   303   }
       
   304 
       
   305   // Reset the spaces for their new regions.
       
   306   eden()->initialize(edenMR,
       
   307                      clear_space && !live_in_eden,
       
   308                      SpaceDecorator::Mangle);
       
   309   // If clear_space and live_in_eden, we will not have cleared any
       
   310   // portion of eden above its top. This can cause newly
       
   311   // expanded space not to be mangled if using ZapUnusedHeapArea.
       
   312   // We explicitly do such mangling here.
       
   313   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
       
   314     eden()->mangle_unused_area();
       
   315   }
       
   316   from()->initialize(fromMR, clear_space, mangle_space);
       
   317   to()->initialize(toMR, clear_space, mangle_space);
       
   318 
       
   319   // Set next compaction spaces.
       
   320   eden()->set_next_compaction_space(from());
       
   321   // The to-space is normally empty before a compaction so need
       
   322   // not be considered.  The exception is during promotion
       
   323   // failure handling when to-space can contain live objects.
       
   324   from()->set_next_compaction_space(NULL);
       
   325 }
       
   326 
       
   327 void DefNewGeneration::swap_spaces() {
       
   328   ContiguousSpace* s = from();
       
   329   _from_space        = to();
       
   330   _to_space          = s;
       
   331   eden()->set_next_compaction_space(from());
       
   332   // The to-space is normally empty before a compaction so need
       
   333   // not be considered.  The exception is during promotion
       
   334   // failure handling when to-space can contain live objects.
       
   335   from()->set_next_compaction_space(NULL);
       
   336 
       
   337   if (UsePerfData) {
       
   338     CSpaceCounters* c = _from_counters;
       
   339     _from_counters = _to_counters;
       
   340     _to_counters = c;
       
   341   }
       
   342 }
       
   343 
       
   344 bool DefNewGeneration::expand(size_t bytes) {
       
   345   MutexLocker x(ExpandHeap_lock);
       
   346   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
       
   347   bool success = _virtual_space.expand_by(bytes);
       
   348   if (success && ZapUnusedHeapArea) {
       
   349     // Mangle newly committed space immediately because it
       
   350     // can be done here more simply that after the new
       
   351     // spaces have been computed.
       
   352     HeapWord* new_high = (HeapWord*) _virtual_space.high();
       
   353     MemRegion mangle_region(prev_high, new_high);
       
   354     SpaceMangler::mangle_region(mangle_region);
       
   355   }
       
   356 
       
   357   // Do not attempt an expand-to-the reserve size.  The
       
   358   // request should properly observe the maximum size of
       
   359   // the generation so an expand-to-reserve should be
       
   360   // unnecessary.  Also a second call to expand-to-reserve
       
   361   // value potentially can cause an undue expansion.
       
   362   // For example if the first expand fail for unknown reasons,
       
   363   // but the second succeeds and expands the heap to its maximum
       
   364   // value.
       
   365   if (GCLocker::is_active()) {
       
   366     log_debug(gc)("Garbage collection disabled, expanded heap instead");
       
   367   }
       
   368 
       
   369   return success;
       
   370 }
       
   371 
       
   372 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
       
   373                                                     size_t new_size_before,
       
   374                                                     size_t alignment) const {
       
   375   size_t desired_new_size = new_size_before;
       
   376 
       
   377   if (NewSizeThreadIncrease > 0) {
       
   378     int threads_count;
       
   379     size_t thread_increase_size = 0;
       
   380 
       
   381     // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
       
   382     threads_count = Threads::number_of_non_daemon_threads();
       
   383     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
       
   384       thread_increase_size = threads_count * NewSizeThreadIncrease;
       
   385 
       
   386       // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
       
   387       if (new_size_candidate <= max_uintx - thread_increase_size) {
       
   388         new_size_candidate += thread_increase_size;
       
   389 
       
   390         // 3. Check an overflow at 'align_up'.
       
   391         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
       
   392         if (new_size_candidate <= aligned_max) {
       
   393           desired_new_size = align_up(new_size_candidate, alignment);
       
   394         }
       
   395       }
       
   396     }
       
   397   }
       
   398 
       
   399   return desired_new_size;
       
   400 }
       
   401 
       
   402 void DefNewGeneration::compute_new_size() {
       
   403   // This is called after a GC that includes the old generation, so from-space
       
   404   // will normally be empty.
       
   405   // Note that we check both spaces, since if scavenge failed they revert roles.
       
   406   // If not we bail out (otherwise we would have to relocate the objects).
       
   407   if (!from()->is_empty() || !to()->is_empty()) {
       
   408     return;
       
   409   }
       
   410 
       
   411   GenCollectedHeap* gch = GenCollectedHeap::heap();
       
   412 
       
   413   size_t old_size = gch->old_gen()->capacity();
       
   414   size_t new_size_before = _virtual_space.committed_size();
       
   415   size_t min_new_size = initial_size();
       
   416   size_t max_new_size = reserved().byte_size();
       
   417   assert(min_new_size <= new_size_before &&
       
   418          new_size_before <= max_new_size,
       
   419          "just checking");
       
   420   // All space sizes must be multiples of Generation::GenGrain.
       
   421   size_t alignment = Generation::GenGrain;
       
   422 
       
   423   int threads_count = 0;
       
   424   size_t thread_increase_size = 0;
       
   425 
       
   426   size_t new_size_candidate = old_size / NewRatio;
       
   427   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
       
   428   // and reverts to previous value if any overflow happens
       
   429   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);
       
   430 
       
   431   // Adjust new generation size
       
   432   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
       
   433   assert(desired_new_size <= max_new_size, "just checking");
       
   434 
       
   435   bool changed = false;
       
   436   if (desired_new_size > new_size_before) {
       
   437     size_t change = desired_new_size - new_size_before;
       
   438     assert(change % alignment == 0, "just checking");
       
   439     if (expand(change)) {
       
   440        changed = true;
       
   441     }
       
   442     // If the heap failed to expand to the desired size,
       
   443     // "changed" will be false.  If the expansion failed
       
   444     // (and at this point it was expected to succeed),
       
   445     // ignore the failure (leaving "changed" as false).
       
   446   }
       
   447   if (desired_new_size < new_size_before && eden()->is_empty()) {
       
   448     // bail out of shrinking if objects in eden
       
   449     size_t change = new_size_before - desired_new_size;
       
   450     assert(change % alignment == 0, "just checking");
       
   451     _virtual_space.shrink_by(change);
       
   452     changed = true;
       
   453   }
       
   454   if (changed) {
       
   455     // The spaces have already been mangled at this point but
       
   456     // may not have been cleared (set top = bottom) and should be.
       
   457     // Mangling was done when the heap was being expanded.
       
   458     compute_space_boundaries(eden()->used(),
       
   459                              SpaceDecorator::Clear,
       
   460                              SpaceDecorator::DontMangle);
       
   461     MemRegion cmr((HeapWord*)_virtual_space.low(),
       
   462                   (HeapWord*)_virtual_space.high());
       
   463     gch->barrier_set()->resize_covered_region(cmr);
       
   464 
       
   465     log_debug(gc, ergo, heap)(
       
   466         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
       
   467         new_size_before/K, _virtual_space.committed_size()/K,
       
   468         eden()->capacity()/K, from()->capacity()/K);
       
   469     log_trace(gc, ergo, heap)(
       
   470         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
       
   471           thread_increase_size/K, threads_count);
       
   472       }
       
   473 }
       
   474 
       
   475 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
       
   476   assert(false, "NYI -- are you sure you want to call this?");
       
   477 }
       
   478 
       
   479 
       
   480 size_t DefNewGeneration::capacity() const {
       
   481   return eden()->capacity()
       
   482        + from()->capacity();  // to() is only used during scavenge
       
   483 }
       
   484 
       
   485 
       
   486 size_t DefNewGeneration::used() const {
       
   487   return eden()->used()
       
   488        + from()->used();      // to() is only used during scavenge
       
   489 }
       
   490 
       
   491 
       
   492 size_t DefNewGeneration::free() const {
       
   493   return eden()->free()
       
   494        + from()->free();      // to() is only used during scavenge
       
   495 }
       
   496 
       
   497 size_t DefNewGeneration::max_capacity() const {
       
   498   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
       
   499   const size_t reserved_bytes = reserved().byte_size();
       
   500   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
       
   501 }
       
   502 
       
   503 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
       
   504   return eden()->free();
       
   505 }
       
   506 
       
   507 size_t DefNewGeneration::capacity_before_gc() const {
       
   508   return eden()->capacity();
       
   509 }
       
   510 
       
   511 size_t DefNewGeneration::contiguous_available() const {
       
   512   return eden()->free();
       
   513 }
       
   514 
       
   515 
       
   516 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
       
   517 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
       
   518 
       
   519 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
       
   520   eden()->object_iterate(blk);
       
   521   from()->object_iterate(blk);
       
   522 }
       
   523 
       
   524 
       
   525 void DefNewGeneration::space_iterate(SpaceClosure* blk,
       
   526                                      bool usedOnly) {
       
   527   blk->do_space(eden());
       
   528   blk->do_space(from());
       
   529   blk->do_space(to());
       
   530 }
       
   531 
       
   532 // The last collection bailed out, we are running out of heap space,
       
   533 // so we try to allocate the from-space, too.
       
   534 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
       
   535   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
       
   536 
       
   537   // If the Heap_lock is not locked by this thread, this will be called
       
   538   // again later with the Heap_lock held.
       
   539   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
       
   540 
       
   541   HeapWord* result = NULL;
       
   542   if (do_alloc) {
       
   543     result = from()->allocate(size);
       
   544   }
       
   545 
       
   546   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
       
   547                         size,
       
   548                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
       
   549                           "true" : "false",
       
   550                         Heap_lock->is_locked() ? "locked" : "unlocked",
       
   551                         from()->free(),
       
   552                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
       
   553                         do_alloc ? "  Heap_lock is not owned by self" : "",
       
   554                         result == NULL ? "NULL" : "object");
       
   555 
       
   556   return result;
       
   557 }
       
   558 
       
   559 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
       
   560                                                 bool   is_tlab,
       
   561                                                 bool   parallel) {
       
   562   // We don't attempt to expand the young generation (but perhaps we should.)
       
   563   return allocate(size, is_tlab);
       
   564 }
       
   565 
       
   566 void DefNewGeneration::adjust_desired_tenuring_threshold() {
       
   567   // Set the desired survivor size to half the real survivor space
       
   568   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
       
   569   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
       
   570 
       
   571   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
       
   572 
       
   573   if (UsePerfData) {
       
   574     GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
       
   575     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
       
   576     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
       
   577   }
       
   578 
       
   579   age_table()->print_age_table(_tenuring_threshold);
       
   580 }
       
   581 
       
   582 void DefNewGeneration::collect(bool   full,
       
   583                                bool   clear_all_soft_refs,
       
   584                                size_t size,
       
   585                                bool   is_tlab) {
       
   586   assert(full || size > 0, "otherwise we don't want to collect");
       
   587 
       
   588   GenCollectedHeap* gch = GenCollectedHeap::heap();
       
   589 
       
   590   _gc_timer->register_gc_start();
       
   591   DefNewTracer gc_tracer;
       
   592   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
       
   593 
       
   594   _old_gen = gch->old_gen();
       
   595 
       
   596   // If the next generation is too full to accommodate promotion
       
   597   // from this generation, pass on collection; let the next generation
       
   598   // do it.
       
   599   if (!collection_attempt_is_safe()) {
       
   600     log_trace(gc)(":: Collection attempt not safe ::");
       
   601     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
       
   602     return;
       
   603   }
       
   604   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
       
   605 
       
   606   init_assuming_no_promotion_failure();
       
   607 
       
   608   GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause());
       
   609 
       
   610   gch->trace_heap_before_gc(&gc_tracer);
       
   611 
       
   612   // These can be shared for all code paths
       
   613   IsAliveClosure is_alive(this);
       
   614   ScanWeakRefClosure scan_weak_ref(this);
       
   615 
       
   616   age_table()->clear();
       
   617   to()->clear(SpaceDecorator::Mangle);
       
   618   // The preserved marks should be empty at the start of the GC.
       
   619   _preserved_marks_set.init(1);
       
   620 
       
   621   gch->rem_set()->prepare_for_younger_refs_iterate(false);
       
   622 
       
   623   assert(gch->no_allocs_since_save_marks(),
       
   624          "save marks have not been newly set.");
       
   625 
       
   626   // Not very pretty.
       
   627   CollectorPolicy* cp = gch->collector_policy();
       
   628 
       
   629   FastScanClosure fsc_with_no_gc_barrier(this, false);
       
   630   FastScanClosure fsc_with_gc_barrier(this, true);
       
   631 
       
   632   KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
       
   633                                       gch->rem_set()->klass_rem_set());
       
   634   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
       
   635                                            &fsc_with_no_gc_barrier,
       
   636                                            false);
       
   637 
       
   638   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
       
   639   FastEvacuateFollowersClosure evacuate_followers(gch,
       
   640                                                   &fsc_with_no_gc_barrier,
       
   641                                                   &fsc_with_gc_barrier);
       
   642 
       
   643   assert(gch->no_allocs_since_save_marks(),
       
   644          "save marks have not been newly set.");
       
   645 
       
   646   {
       
   647     // DefNew needs to run with n_threads == 0, to make sure the serial
       
   648     // version of the card table scanning code is used.
       
   649     // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
       
   650     StrongRootsScope srs(0);
       
   651 
       
   652     gch->young_process_roots(&srs,
       
   653                              &fsc_with_no_gc_barrier,
       
   654                              &fsc_with_gc_barrier,
       
   655                              &cld_scan_closure);
       
   656   }
       
   657 
       
   658   // "evacuate followers".
       
   659   evacuate_followers.do_void();
       
   660 
       
   661   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
       
   662   ReferenceProcessor* rp = ref_processor();
       
   663   rp->setup_policy(clear_all_soft_refs);
       
   664   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
       
   665   const ReferenceProcessorStats& stats =
       
   666   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
       
   667                                     NULL, &pt);
       
   668   gc_tracer.report_gc_reference_stats(stats);
       
   669   gc_tracer.report_tenuring_threshold(tenuring_threshold());
       
   670   pt.print_all_references();
       
   671 
       
   672   if (!_promotion_failed) {
       
   673     // Swap the survivor spaces.
       
   674     eden()->clear(SpaceDecorator::Mangle);
       
   675     from()->clear(SpaceDecorator::Mangle);
       
   676     if (ZapUnusedHeapArea) {
       
   677       // This is now done here because of the piece-meal mangling which
       
   678       // can check for valid mangling at intermediate points in the
       
   679       // collection(s).  When a young collection fails to collect
       
   680       // sufficient space resizing of the young generation can occur
       
   681       // an redistribute the spaces in the young generation.  Mangle
       
   682       // here so that unzapped regions don't get distributed to
       
   683       // other spaces.
       
   684       to()->mangle_unused_area();
       
   685     }
       
   686     swap_spaces();
       
   687 
       
   688     assert(to()->is_empty(), "to space should be empty now");
       
   689 
       
   690     adjust_desired_tenuring_threshold();
       
   691 
       
   692     // A successful scavenge should restart the GC time limit count which is
       
   693     // for full GC's.
       
   694     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
       
   695     size_policy->reset_gc_overhead_limit_count();
       
   696     assert(!gch->incremental_collection_failed(), "Should be clear");
       
   697   } else {
       
   698     assert(_promo_failure_scan_stack.is_empty(), "post condition");
       
   699     _promo_failure_scan_stack.clear(true); // Clear cached segments.
       
   700 
       
   701     remove_forwarding_pointers();
       
   702     log_info(gc, promotion)("Promotion failed");
       
   703     // Add to-space to the list of space to compact
       
   704     // when a promotion failure has occurred.  In that
       
   705     // case there can be live objects in to-space
       
   706     // as a result of a partial evacuation of eden
       
   707     // and from-space.
       
   708     swap_spaces();   // For uniformity wrt ParNewGeneration.
       
   709     from()->set_next_compaction_space(to());
       
   710     gch->set_incremental_collection_failed();
       
   711 
       
   712     // Inform the next generation that a promotion failure occurred.
       
   713     _old_gen->promotion_failure_occurred();
       
   714     gc_tracer.report_promotion_failed(_promotion_failed_info);
       
   715 
       
   716     // Reset the PromotionFailureALot counters.
       
   717     NOT_PRODUCT(gch->reset_promotion_should_fail();)
       
   718   }
       
   719   // We should have processed and cleared all the preserved marks.
       
   720   _preserved_marks_set.reclaim();
       
   721   // set new iteration safe limit for the survivor spaces
       
   722   from()->set_concurrent_iteration_safe_limit(from()->top());
       
   723   to()->set_concurrent_iteration_safe_limit(to()->top());
       
   724 
       
   725   // We need to use a monotonically non-decreasing time in ms
       
   726   // or we will see time-warp warnings and os::javaTimeMillis()
       
   727   // does not guarantee monotonicity.
       
   728   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
       
   729   update_time_of_last_gc(now);
       
   730 
       
   731   gch->trace_heap_after_gc(&gc_tracer);
       
   732 
       
   733   _gc_timer->register_gc_end();
       
   734 
       
   735   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
       
   736 }
       
   737 
       
   738 void DefNewGeneration::init_assuming_no_promotion_failure() {
       
   739   _promotion_failed = false;
       
   740   _promotion_failed_info.reset();
       
   741   from()->set_next_compaction_space(NULL);
       
   742 }
       
   743 
       
   744 void DefNewGeneration::remove_forwarding_pointers() {
       
   745   RemoveForwardedPointerClosure rspc;
       
   746   eden()->object_iterate(&rspc);
       
   747   from()->object_iterate(&rspc);
       
   748 
       
   749   SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers());
       
   750   _preserved_marks_set.restore(&task_executor);
       
   751 }
       
   752 
       
   753 void DefNewGeneration::handle_promotion_failure(oop old) {
       
   754   log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
       
   755 
       
   756   _promotion_failed = true;
       
   757   _promotion_failed_info.register_copy_failure(old->size());
       
   758   _preserved_marks_set.get()->push_if_necessary(old, old->mark());
       
   759   // forward to self
       
   760   old->forward_to(old);
       
   761 
       
   762   _promo_failure_scan_stack.push(old);
       
   763 
       
   764   if (!_promo_failure_drain_in_progress) {
       
   765     // prevent recursion in copy_to_survivor_space()
       
   766     _promo_failure_drain_in_progress = true;
       
   767     drain_promo_failure_scan_stack();
       
   768     _promo_failure_drain_in_progress = false;
       
   769   }
       
   770 }
       
   771 
       
   772 oop DefNewGeneration::copy_to_survivor_space(oop old) {
       
   773   assert(is_in_reserved(old) && !old->is_forwarded(),
       
   774          "shouldn't be scavenging this oop");
       
   775   size_t s = old->size();
       
   776   oop obj = NULL;
       
   777 
       
   778   // Try allocating obj in to-space (unless too old)
       
   779   if (old->age() < tenuring_threshold()) {
       
   780     obj = (oop) to()->allocate_aligned(s);
       
   781   }
       
   782 
       
   783   // Otherwise try allocating obj tenured
       
   784   if (obj == NULL) {
       
   785     obj = _old_gen->promote(old, s);
       
   786     if (obj == NULL) {
       
   787       handle_promotion_failure(old);
       
   788       return old;
       
   789     }
       
   790   } else {
       
   791     // Prefetch beyond obj
       
   792     const intx interval = PrefetchCopyIntervalInBytes;
       
   793     Prefetch::write(obj, interval);
       
   794 
       
   795     // Copy obj
       
   796     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
       
   797 
       
   798     // Increment age if obj still in new generation
       
   799     obj->incr_age();
       
   800     age_table()->add(obj, s);
       
   801   }
       
   802 
       
   803   // Done, insert forward pointer to obj in this header
       
   804   old->forward_to(obj);
       
   805 
       
   806   return obj;
       
   807 }
       
   808 
       
   809 void DefNewGeneration::drain_promo_failure_scan_stack() {
       
   810   while (!_promo_failure_scan_stack.is_empty()) {
       
   811      oop obj = _promo_failure_scan_stack.pop();
       
   812      obj->oop_iterate(_promo_failure_scan_stack_closure);
       
   813   }
       
   814 }
       
   815 
       
   816 void DefNewGeneration::save_marks() {
       
   817   eden()->set_saved_mark();
       
   818   to()->set_saved_mark();
       
   819   from()->set_saved_mark();
       
   820 }
       
   821 
       
   822 
       
   823 void DefNewGeneration::reset_saved_marks() {
       
   824   eden()->reset_saved_mark();
       
   825   to()->reset_saved_mark();
       
   826   from()->reset_saved_mark();
       
   827 }
       
   828 
       
   829 
       
   830 bool DefNewGeneration::no_allocs_since_save_marks() {
       
   831   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
       
   832   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
       
   833   return to()->saved_mark_at_top();
       
   834 }
       
   835 
       
   836 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
       
   837                                                                 \
       
   838 void DefNewGeneration::                                         \
       
   839 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
       
   840   cl->set_generation(this);                                     \
       
   841   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
       
   842   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
       
   843   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
       
   844   cl->reset_generation();                                       \
       
   845   save_marks();                                                 \
       
   846 }
       
   847 
       
   848 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
       
   849 
       
   850 #undef DefNew_SINCE_SAVE_MARKS_DEFN
       
   851 
       
   852 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
       
   853                                          size_t max_alloc_words) {
       
   854   if (requestor == this || _promotion_failed) {
       
   855     return;
       
   856   }
       
   857   assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
       
   858 
       
   859   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
       
   860   if (to_space->top() > to_space->bottom()) {
       
   861     trace("to_space not empty when contribute_scratch called");
       
   862   }
       
   863   */
       
   864 
       
   865   ContiguousSpace* to_space = to();
       
   866   assert(to_space->end() >= to_space->top(), "pointers out of order");
       
   867   size_t free_words = pointer_delta(to_space->end(), to_space->top());
       
   868   if (free_words >= MinFreeScratchWords) {
       
   869     ScratchBlock* sb = (ScratchBlock*)to_space->top();
       
   870     sb->num_words = free_words;
       
   871     sb->next = list;
       
   872     list = sb;
       
   873   }
       
   874 }
       
   875 
       
   876 void DefNewGeneration::reset_scratch() {
       
   877   // If contributing scratch in to_space, mangle all of
       
   878   // to_space if ZapUnusedHeapArea.  This is needed because
       
   879   // top is not maintained while using to-space as scratch.
       
   880   if (ZapUnusedHeapArea) {
       
   881     to()->mangle_unused_area_complete();
       
   882   }
       
   883 }
       
   884 
       
   885 bool DefNewGeneration::collection_attempt_is_safe() {
       
   886   if (!to()->is_empty()) {
       
   887     log_trace(gc)(":: to is not empty ::");
       
   888     return false;
       
   889   }
       
   890   if (_old_gen == NULL) {
       
   891     GenCollectedHeap* gch = GenCollectedHeap::heap();
       
   892     _old_gen = gch->old_gen();
       
   893   }
       
   894   return _old_gen->promotion_attempt_is_safe(used());
       
   895 }
       
   896 
       
   897 void DefNewGeneration::gc_epilogue(bool full) {
       
   898   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
       
   899 
       
   900   assert(!GCLocker::is_active(), "We should not be executing here");
       
   901   // Check if the heap is approaching full after a collection has
       
   902   // been done.  Generally the young generation is empty at
       
   903   // a minimum at the end of a collection.  If it is not, then
       
   904   // the heap is approaching full.
       
   905   GenCollectedHeap* gch = GenCollectedHeap::heap();
       
   906   if (full) {
       
   907     DEBUG_ONLY(seen_incremental_collection_failed = false;)
       
   908     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
       
   909       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
       
   910                             GCCause::to_string(gch->gc_cause()));
       
   911       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
       
   912       set_should_allocate_from_space(); // we seem to be running out of space
       
   913     } else {
       
   914       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
       
   915                             GCCause::to_string(gch->gc_cause()));
       
   916       gch->clear_incremental_collection_failed(); // We just did a full collection
       
   917       clear_should_allocate_from_space(); // if set
       
   918     }
       
   919   } else {
       
   920 #ifdef ASSERT
       
   921     // It is possible that incremental_collection_failed() == true
       
   922     // here, because an attempted scavenge did not succeed. The policy
       
   923     // is normally expected to cause a full collection which should
       
   924     // clear that condition, so we should not be here twice in a row
       
   925     // with incremental_collection_failed() == true without having done
       
   926     // a full collection in between.
       
   927     if (!seen_incremental_collection_failed &&
       
   928         gch->incremental_collection_failed()) {
       
   929       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
       
   930                             GCCause::to_string(gch->gc_cause()));
       
   931       seen_incremental_collection_failed = true;
       
   932     } else if (seen_incremental_collection_failed) {
       
   933       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
       
   934                             GCCause::to_string(gch->gc_cause()));
       
   935       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
       
   936              (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
       
   937              !gch->incremental_collection_failed(),
       
   938              "Twice in a row");
       
   939       seen_incremental_collection_failed = false;
       
   940     }
       
   941 #endif // ASSERT
       
   942   }
       
   943 
       
   944   if (ZapUnusedHeapArea) {
       
   945     eden()->check_mangled_unused_area_complete();
       
   946     from()->check_mangled_unused_area_complete();
       
   947     to()->check_mangled_unused_area_complete();
       
   948   }
       
   949 
       
   950   if (!CleanChunkPoolAsync) {
       
   951     Chunk::clean_chunk_pool();
       
   952   }
       
   953 
       
   954   // update the generation and space performance counters
       
   955   update_counters();
       
   956   gch->gen_policy()->counters()->update_counters();
       
   957 }
       
   958 
       
   959 void DefNewGeneration::record_spaces_top() {
       
   960   assert(ZapUnusedHeapArea, "Not mangling unused space");
       
   961   eden()->set_top_for_allocations();
       
   962   to()->set_top_for_allocations();
       
   963   from()->set_top_for_allocations();
       
   964 }
       
   965 
       
   966 void DefNewGeneration::ref_processor_init() {
       
   967   Generation::ref_processor_init();
       
   968 }
       
   969 
       
   970 
       
   971 void DefNewGeneration::update_counters() {
       
   972   if (UsePerfData) {
       
   973     _eden_counters->update_all();
       
   974     _from_counters->update_all();
       
   975     _to_counters->update_all();
       
   976     _gen_counters->update_all();
       
   977   }
       
   978 }
       
   979 
       
   980 void DefNewGeneration::verify() {
       
   981   eden()->verify();
       
   982   from()->verify();
       
   983     to()->verify();
       
   984 }
       
   985 
       
   986 void DefNewGeneration::print_on(outputStream* st) const {
       
   987   Generation::print_on(st);
       
   988   st->print("  eden");
       
   989   eden()->print_on(st);
       
   990   st->print("  from");
       
   991   from()->print_on(st);
       
   992   st->print("  to  ");
       
   993   to()->print_on(st);
       
   994 }
       
   995 
       
   996 
       
   997 const char* DefNewGeneration::name() const {
       
   998   return "def new generation";
       
   999 }
       
  1000 
       
  1001 // Moved from inline file as they are not called inline
       
  1002 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
       
  1003   return eden();
       
  1004 }
       
  1005 
       
  1006 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
       
  1007   // This is the slow-path allocation for the DefNewGeneration.
       
  1008   // Most allocations are fast-path in compiled code.
       
  1009   // We try to allocate from the eden.  If that works, we are happy.
       
  1010   // Note that since DefNewGeneration supports lock-free allocation, we
       
  1011   // have to use it here, as well.
       
  1012   HeapWord* result = eden()->par_allocate(word_size);
       
  1013   if (result != NULL) {
       
  1014     if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
       
  1015       _old_gen->sample_eden_chunk();
       
  1016     }
       
  1017   } else {
       
  1018     // If the eden is full and the last collection bailed out, we are running
       
  1019     // out of heap space, and we try to allocate the from-space, too.
       
  1020     // allocate_from_space can't be inlined because that would introduce a
       
  1021     // circular dependency at compile time.
       
  1022     result = allocate_from_space(word_size);
       
  1023   }
       
  1024   return result;
       
  1025 }
       
  1026 
       
  1027 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
       
  1028                                          bool is_tlab) {
       
  1029   HeapWord* res = eden()->par_allocate(word_size);
       
  1030   if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
       
  1031     _old_gen->sample_eden_chunk();
       
  1032   }
       
  1033   return res;
       
  1034 }
       
  1035 
       
  1036 size_t DefNewGeneration::tlab_capacity() const {
       
  1037   return eden()->capacity();
       
  1038 }
       
  1039 
       
  1040 size_t DefNewGeneration::tlab_used() const {
       
  1041   return eden()->used();
       
  1042 }
       
  1043 
       
  1044 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
       
  1045   return unsafe_max_alloc_nogc();
       
  1046 }