hotspot/src/share/vm/memory/genCollectedHeap.cpp
changeset 27249 698a2dc8e83a
parent 26829 26315213bab8
child 27252 9703e3f1f92a
equal deleted inserted replaced
27248:5862834b1594 27249:698a2dc8e83a
   128   _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
   128   _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
   129   set_barrier_set(rem_set()->bs());
   129   set_barrier_set(rem_set()->bs());
   130 
   130 
   131   _gch = this;
   131   _gch = this;
   132 
   132 
   133   for (i = 0; i < _n_gens; i++) {
   133   ReservedSpace young_rs = heap_rs.first_part(_gen_specs[0]->max_size(), false, false);
   134     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
   134   _young_gen = _gen_specs[0]->init(young_rs, 0, rem_set());
   135     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
   135   heap_rs = heap_rs.last_part(_gen_specs[0]->max_size());
   136     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
   136 
   137   }
   137   ReservedSpace old_rs = heap_rs.first_part(_gen_specs[1]->max_size(), false, false);
       
   138   _old_gen = _gen_specs[1]->init(old_rs, 1, rem_set());
       
   139   heap_rs = heap_rs.last_part(_gen_specs[1]->max_size());
   138   clear_incremental_collection_failed();
   140   clear_incremental_collection_failed();
   139 
   141 
   140 #if INCLUDE_ALL_GCS
   142 #if INCLUDE_ALL_GCS
   141   // If we are running CMS, create the collector responsible
   143   // If we are running CMS, create the collector responsible
   142   // for collecting the CMS generations.
   144   // for collecting the CMS generations.
   147 #endif // INCLUDE_ALL_GCS
   149 #endif // INCLUDE_ALL_GCS
   148 
   150 
   149   return JNI_OK;
   151   return JNI_OK;
   150 }
   152 }
   151 
   153 
   152 
       
   153 char* GenCollectedHeap::allocate(size_t alignment,
   154 char* GenCollectedHeap::allocate(size_t alignment,
   154                                  size_t* _total_reserved,
   155                                  size_t* _total_reserved,
   155                                  int* _n_covered_regions,
   156                                  int* _n_covered_regions,
   156                                  ReservedSpace* heap_rs){
   157                                  ReservedSpace* heap_rs){
   157   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
   158   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
   184   *_n_covered_regions = n_covered_regions;
   185   *_n_covered_regions = n_covered_regions;
   185 
   186 
   186   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   187   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   187   return heap_rs->base();
   188   return heap_rs->base();
   188 }
   189 }
   189 
       
   190 
   190 
   191 void GenCollectedHeap::post_initialize() {
   191 void GenCollectedHeap::post_initialize() {
   192   SharedHeap::post_initialize();
   192   SharedHeap::post_initialize();
   193   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
   193   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
   194   guarantee(policy->is_generation_policy(), "Illegal policy type");
   194   guarantee(policy->is_generation_policy(), "Illegal policy type");
   208   policy->initialize_gc_policy_counters();
   208   policy->initialize_gc_policy_counters();
   209 }
   209 }
   210 
   210 
   211 void GenCollectedHeap::ref_processing_init() {
   211 void GenCollectedHeap::ref_processing_init() {
   212   SharedHeap::ref_processing_init();
   212   SharedHeap::ref_processing_init();
   213   for (int i = 0; i < _n_gens; i++) {
   213   _young_gen->ref_processor_init();
   214     _gens[i]->ref_processor_init();
   214   _old_gen->ref_processor_init();
   215   }
       
   216 }
   215 }
   217 
   216 
   218 size_t GenCollectedHeap::capacity() const {
   217 size_t GenCollectedHeap::capacity() const {
   219   size_t res = 0;
   218   return _young_gen->capacity() + _old_gen->capacity();
   220   for (int i = 0; i < _n_gens; i++) {
       
   221     res += _gens[i]->capacity();
       
   222   }
       
   223   return res;
       
   224 }
   219 }
   225 
   220 
   226 size_t GenCollectedHeap::used() const {
   221 size_t GenCollectedHeap::used() const {
   227   size_t res = 0;
   222   return _young_gen->used() + _old_gen->used();
   228   for (int i = 0; i < _n_gens; i++) {
       
   229     res += _gens[i]->used();
       
   230   }
       
   231   return res;
       
   232 }
   223 }
   233 
   224 
   234 // Save the "used_region" for generations level and lower.
   225 // Save the "used_region" for generations level and lower.
   235 void GenCollectedHeap::save_used_regions(int level) {
   226 void GenCollectedHeap::save_used_regions(int level) {
   236   assert(level < _n_gens, "Illegal level parameter");
   227   assert(level < _n_gens, "Illegal level parameter");
   237   for (int i = level; i >= 0; i--) {
   228   if (level == 1) {
   238     _gens[i]->save_used_region();
   229     _old_gen->save_used_region();
   239   }
   230   }
       
   231   _young_gen->save_used_region();
   240 }
   232 }
   241 
   233 
   242 size_t GenCollectedHeap::max_capacity() const {
   234 size_t GenCollectedHeap::max_capacity() const {
   243   size_t res = 0;
   235   return _young_gen->max_capacity() + _old_gen->max_capacity();
   244   for (int i = 0; i < _n_gens; i++) {
       
   245     res += _gens[i]->max_capacity();
       
   246   }
       
   247   return res;
       
   248 }
   236 }
   249 
   237 
   250 // Update the _full_collections_completed counter
   238 // Update the _full_collections_completed counter
   251 // at the end of a stop-world full GC.
   239 // at the end of a stop-world full GC.
   252 unsigned int GenCollectedHeap::update_full_collections_completed() {
   240 unsigned int GenCollectedHeap::update_full_collections_completed() {
   306 #endif
   294 #endif
   307 
   295 
   308 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
   296 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
   309                                                bool is_tlab,
   297                                                bool is_tlab,
   310                                                bool first_only) {
   298                                                bool first_only) {
   311   HeapWord* res;
   299   HeapWord* res = NULL;
   312   for (int i = 0; i < _n_gens; i++) {
   300 
   313     if (_gens[i]->should_allocate(size, is_tlab)) {
   301   if (_young_gen->should_allocate(size, is_tlab)) {
   314       res = _gens[i]->allocate(size, is_tlab);
   302     res = _young_gen->allocate(size, is_tlab);
   315       if (res != NULL) return res;
   303     if (res != NULL || first_only) {
   316       else if (first_only) break;
   304       return res;
   317     }
   305     }
   318   }
   306   }
   319   // Otherwise...
   307 
   320   return NULL;
   308   if (_old_gen->should_allocate(size, is_tlab)) {
       
   309     res = _old_gen->allocate(size, is_tlab);
       
   310   }
       
   311 
       
   312   return res;
   321 }
   313 }
   322 
   314 
   323 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
   315 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
   324                                          bool* gc_overhead_limit_was_exceeded) {
   316                                          bool* gc_overhead_limit_was_exceeded) {
   325   return collector_policy()->mem_allocate_work(size,
   317   return collector_policy()->mem_allocate_work(size,
   335   return UseConcMarkSweepGC &&
   327   return UseConcMarkSweepGC &&
   336          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
   328          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
   337           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
   329           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
   338 }
   330 }
   339 
   331 
   340 void GenCollectedHeap::do_collection(bool  full,
   332 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
       
   333                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
       
   334   // Timer for individual generations. Last argument is false: no CR
       
   335   // FIXME: We should try to start the timing earlier to cover more of the GC pause
       
   336   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
       
   337   // so we can assume here that the next GC id is what we want.
       
   338   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
       
   339   TraceCollectorStats tcs(gen->counters());
       
   340   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
       
   341 
       
   342   size_t prev_used = gen->used();
       
   343   gen->stat_record()->invocations++;
       
   344   gen->stat_record()->accumulated_time.start();
       
   345 
       
   346   // Must be done anew before each collection because
       
   347   // a previous collection will do mangling and will
       
   348   // change top of some spaces.
       
   349   record_gen_tops_before_GC();
       
   350 
       
   351   if (PrintGC && Verbose) {
       
   352     gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
       
   353                         gen->level(),
       
   354                         gen->stat_record()->invocations,
       
   355                         size * HeapWordSize);
       
   356   }
       
   357 
       
   358   if (run_verification && VerifyBeforeGC) {
       
   359     HandleMark hm;  // Discard invalid handles created during verification
       
   360     Universe::verify(" VerifyBeforeGC:");
       
   361   }
       
   362   COMPILER2_PRESENT(DerivedPointerTable::clear());
       
   363 
       
   364   // Do collection work
       
   365   {
       
   366     // Note on ref discovery: For what appear to be historical reasons,
       
   367     // GCH enables and disabled (by enqueing) refs discovery.
       
   368     // In the future this should be moved into the generation's
       
   369     // collect method so that ref discovery and enqueueing concerns
       
   370     // are local to a generation. The collect method could return
       
   371     // an appropriate indication in the case that notification on
       
   372     // the ref lock was needed. This will make the treatment of
       
   373     // weak refs more uniform (and indeed remove such concerns
       
   374     // from GCH). XXX
       
   375 
       
   376     HandleMark hm;  // Discard invalid handles created during gc
       
   377     save_marks();   // save marks for all gens
       
   378     // We want to discover references, but not process them yet.
       
   379     // This mode is disabled in process_discovered_references if the
       
   380     // generation does some collection work, or in
       
   381     // enqueue_discovered_references if the generation returns
       
   382     // without doing any work.
       
   383     ReferenceProcessor* rp = gen->ref_processor();
       
   384     // If the discovery of ("weak") refs in this generation is
       
   385     // atomic wrt other collectors in this configuration, we
       
   386     // are guaranteed to have empty discovered ref lists.
       
   387     if (rp->discovery_is_atomic()) {
       
   388       rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
       
   389       rp->setup_policy(clear_soft_refs);
       
   390     } else {
       
   391       // collect() below will enable discovery as appropriate
       
   392     }
       
   393     gen->collect(full, clear_soft_refs, size, is_tlab);
       
   394     if (!rp->enqueuing_is_done()) {
       
   395       rp->enqueue_discovered_references();
       
   396     } else {
       
   397       rp->set_enqueuing_is_done(false);
       
   398     }
       
   399     rp->verify_no_references_recorded();
       
   400   }
       
   401 
       
   402   // Determine if allocation request was met.
       
   403   if (size > 0) {
       
   404     if (!is_tlab || gen->supports_tlab_allocation()) {
       
   405       if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) {
       
   406         size = 0;
       
   407       }
       
   408     }
       
   409   }
       
   410 
       
   411   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
       
   412 
       
   413   gen->stat_record()->accumulated_time.stop();
       
   414 
       
   415   update_gc_stats(gen->level(), full);
       
   416 
       
   417   if (run_verification && VerifyAfterGC) {
       
   418     HandleMark hm;  // Discard invalid handles created during verification
       
   419     Universe::verify(" VerifyAfterGC:");
       
   420   }
       
   421 
       
   422   if (PrintGCDetails) {
       
   423     gclog_or_tty->print(":");
       
   424     gen->print_heap_change(prev_used);
       
   425   }
       
   426 }
       
   427 
       
   428 void GenCollectedHeap::do_collection(bool   full,
   341                                      bool   clear_all_soft_refs,
   429                                      bool   clear_all_soft_refs,
   342                                      size_t size,
   430                                      size_t size,
   343                                      bool   is_tlab,
   431                                      bool   is_tlab,
   344                                      int    max_level) {
   432                                      int    max_level) {
   345   bool prepared_for_verification = false;
       
   346   ResourceMark rm;
   433   ResourceMark rm;
   347   DEBUG_ONLY(Thread* my_thread = Thread::current();)
   434   DEBUG_ONLY(Thread* my_thread = Thread::current();)
   348 
   435 
   349   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   436   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   350   assert(my_thread->is_VM_thread() ||
   437   assert(my_thread->is_VM_thread() ||
   381 
   468 
   382     gc_prologue(complete);
   469     gc_prologue(complete);
   383     increment_total_collections(complete);
   470     increment_total_collections(complete);
   384 
   471 
   385     size_t gch_prev_used = used();
   472     size_t gch_prev_used = used();
   386 
   473     bool must_restore_marks_for_biased_locking = false;
   387     int starting_level = 0;
   474     bool run_verification = total_collections() >= VerifyGCStartAt;
   388     if (full) {
   475 
   389       // Search for the oldest generation which will collect all younger
   476     if (_young_gen->performs_in_place_marking() ||
   390       // generations, and start collection loop there.
   477         _old_gen->performs_in_place_marking()) {
   391       for (int i = max_level; i >= 0; i--) {
   478       // We want to avoid doing this for
   392         if (_gens[i]->full_collects_younger_generations()) {
   479       // scavenge-only collections where it's unnecessary.
   393           starting_level = i;
   480       must_restore_marks_for_biased_locking = true;
   394           break;
   481       BiasedLocking::preserve_marks();
       
   482     }
       
   483 
       
   484     bool prepared_for_verification = false;
       
   485     int max_level_collected = 0;
       
   486     if (!(full && _old_gen->full_collects_younger_generations()) &&
       
   487         _young_gen->should_collect(full, size, is_tlab)) {
       
   488       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
       
   489         prepare_for_verify();
       
   490         prepared_for_verification = true;
       
   491       }
       
   492       collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs);
       
   493     }
       
   494     if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
       
   495       if (!complete) {
       
   496         // The full_collections increment was missed above.
       
   497         increment_total_full_collections();
       
   498       }
       
   499       pre_full_gc_dump(NULL);    // do any pre full gc dumps
       
   500       if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) {
       
   501         if (!prepared_for_verification) {
       
   502           prepare_for_verify();
   395         }
   503         }
   396       }
   504       }
   397     }
   505       collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs);
   398 
   506       max_level_collected = 1;
   399     bool must_restore_marks_for_biased_locking = false;
       
   400 
       
   401     int max_level_collected = starting_level;
       
   402     for (int i = starting_level; i <= max_level; i++) {
       
   403       if (_gens[i]->should_collect(full, size, is_tlab)) {
       
   404         if (i == n_gens() - 1) {  // a major collection is to happen
       
   405           if (!complete) {
       
   406             // The full_collections increment was missed above.
       
   407             increment_total_full_collections();
       
   408           }
       
   409           pre_full_gc_dump(NULL);    // do any pre full gc dumps
       
   410         }
       
   411         // Timer for individual generations. Last argument is false: no CR
       
   412         // FIXME: We should try to start the timing earlier to cover more of the GC pause
       
   413         // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
       
   414         // so we can assume here that the next GC id is what we want.
       
   415         GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
       
   416         TraceCollectorStats tcs(_gens[i]->counters());
       
   417         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
       
   418 
       
   419         size_t prev_used = _gens[i]->used();
       
   420         _gens[i]->stat_record()->invocations++;
       
   421         _gens[i]->stat_record()->accumulated_time.start();
       
   422 
       
   423         // Must be done anew before each collection because
       
   424         // a previous collection will do mangling and will
       
   425         // change top of some spaces.
       
   426         record_gen_tops_before_GC();
       
   427 
       
   428         if (PrintGC && Verbose) {
       
   429           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
       
   430                      i,
       
   431                      _gens[i]->stat_record()->invocations,
       
   432                      size*HeapWordSize);
       
   433         }
       
   434 
       
   435         if (VerifyBeforeGC && i >= VerifyGCLevel &&
       
   436             total_collections() >= VerifyGCStartAt) {
       
   437           HandleMark hm;  // Discard invalid handles created during verification
       
   438           if (!prepared_for_verification) {
       
   439             prepare_for_verify();
       
   440             prepared_for_verification = true;
       
   441           }
       
   442           Universe::verify(" VerifyBeforeGC:");
       
   443         }
       
   444         COMPILER2_PRESENT(DerivedPointerTable::clear());
       
   445 
       
   446         if (!must_restore_marks_for_biased_locking &&
       
   447             _gens[i]->performs_in_place_marking()) {
       
   448           // We perform this mark word preservation work lazily
       
   449           // because it's only at this point that we know whether we
       
   450           // absolutely have to do it; we want to avoid doing it for
       
   451           // scavenge-only collections where it's unnecessary
       
   452           must_restore_marks_for_biased_locking = true;
       
   453           BiasedLocking::preserve_marks();
       
   454         }
       
   455 
       
   456         // Do collection work
       
   457         {
       
   458           // Note on ref discovery: For what appear to be historical reasons,
       
   459           // GCH enables and disabled (by enqueing) refs discovery.
       
   460           // In the future this should be moved into the generation's
       
   461           // collect method so that ref discovery and enqueueing concerns
       
   462           // are local to a generation. The collect method could return
       
   463           // an appropriate indication in the case that notification on
       
   464           // the ref lock was needed. This will make the treatment of
       
   465           // weak refs more uniform (and indeed remove such concerns
       
   466           // from GCH). XXX
       
   467 
       
   468           HandleMark hm;  // Discard invalid handles created during gc
       
   469           save_marks();   // save marks for all gens
       
   470           // We want to discover references, but not process them yet.
       
   471           // This mode is disabled in process_discovered_references if the
       
   472           // generation does some collection work, or in
       
   473           // enqueue_discovered_references if the generation returns
       
   474           // without doing any work.
       
   475           ReferenceProcessor* rp = _gens[i]->ref_processor();
       
   476           // If the discovery of ("weak") refs in this generation is
       
   477           // atomic wrt other collectors in this configuration, we
       
   478           // are guaranteed to have empty discovered ref lists.
       
   479           if (rp->discovery_is_atomic()) {
       
   480             rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
       
   481             rp->setup_policy(do_clear_all_soft_refs);
       
   482           } else {
       
   483             // collect() below will enable discovery as appropriate
       
   484           }
       
   485           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
       
   486           if (!rp->enqueuing_is_done()) {
       
   487             rp->enqueue_discovered_references();
       
   488           } else {
       
   489             rp->set_enqueuing_is_done(false);
       
   490           }
       
   491           rp->verify_no_references_recorded();
       
   492         }
       
   493         max_level_collected = i;
       
   494 
       
   495         // Determine if allocation request was met.
       
   496         if (size > 0) {
       
   497           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
       
   498             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
       
   499               size = 0;
       
   500             }
       
   501           }
       
   502         }
       
   503 
       
   504         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
       
   505 
       
   506         _gens[i]->stat_record()->accumulated_time.stop();
       
   507 
       
   508         update_gc_stats(i, full);
       
   509 
       
   510         if (VerifyAfterGC && i >= VerifyGCLevel &&
       
   511             total_collections() >= VerifyGCStartAt) {
       
   512           HandleMark hm;  // Discard invalid handles created during verification
       
   513           Universe::verify(" VerifyAfterGC:");
       
   514         }
       
   515 
       
   516         if (PrintGCDetails) {
       
   517           gclog_or_tty->print(":");
       
   518           _gens[i]->print_heap_change(prev_used);
       
   519         }
       
   520       }
       
   521     }
   507     }
   522 
   508 
   523     // Update "complete" boolean wrt what actually transpired --
   509     // Update "complete" boolean wrt what actually transpired --
   524     // for instance, a promotion failure could have led to
   510     // for instance, a promotion failure could have led to
   525     // a whole heap collection.
   511     // a whole heap collection.
   537       if (complete) {
   523       if (complete) {
   538         MetaspaceAux::print_metaspace_change(metadata_prev_used);
   524         MetaspaceAux::print_metaspace_change(metadata_prev_used);
   539       }
   525       }
   540     }
   526     }
   541 
   527 
   542     for (int j = max_level_collected; j >= 0; j -= 1) {
   528     // Adjust generation sizes.
   543       // Adjust generation sizes.
   529     if (max_level_collected == 1) {
   544       _gens[j]->compute_new_size();
   530       _old_gen->compute_new_size();
   545     }
   531     }
       
   532     _young_gen->compute_new_size();
   546 
   533 
   547     if (complete) {
   534     if (complete) {
   548       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   535       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   549       ClassLoaderDataGraph::purge();
   536       ClassLoaderDataGraph::purge();
   550       MetaspaceAux::verify_metrics();
   537       MetaspaceAux::verify_metrics();
   597                             cld_closure, weak_cld_closure,
   584                             cld_closure, weak_cld_closure,
   598                             code_closure);
   585                             code_closure);
   599 
   586 
   600   if (younger_gens_as_roots) {
   587   if (younger_gens_as_roots) {
   601     if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   588     if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   602       for (int i = 0; i < level; i++) {
   589       if (level == 1) {
   603         not_older_gens->set_generation(_gens[i]);
   590         not_older_gens->set_generation(_young_gen);
   604         _gens[i]->oop_iterate(not_older_gens);
   591         _young_gen->oop_iterate(not_older_gens);
   605       }
   592       }
   606       not_older_gens->reset_generation();
   593       not_older_gens->reset_generation();
   607     }
   594     }
   608   }
   595   }
   609   // When collection is parallel, all threads get to cooperate to do
   596   // When collection is parallel, all threads get to cooperate to do
   610   // older-gen scanning.
   597   // older-gen scanning.
   611   for (int i = level+1; i < _n_gens; i++) {
   598   if (level == 0) {
   612     older_gens->set_generation(_gens[i]);
   599     older_gens->set_generation(_old_gen);
   613     rem_set()->younger_refs_iterate(_gens[i], older_gens);
   600     rem_set()->younger_refs_iterate(_old_gen, older_gens);
   614     older_gens->reset_generation();
   601     older_gens->reset_generation();
   615   }
   602   }
   616 
   603 
   617   _gen_process_roots_tasks->all_tasks_completed();
   604   _gen_process_roots_tasks->all_tasks_completed();
   618 }
   605 }
   649 }
   636 }
   650 
   637 
   651 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
   638 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
   652   SharedHeap::process_weak_roots(root_closure);
   639   SharedHeap::process_weak_roots(root_closure);
   653   // "Local" "weak" refs
   640   // "Local" "weak" refs
   654   for (int i = 0; i < _n_gens; i++) {
   641   _young_gen->ref_processor()->weak_oops_do(root_closure);
   655     _gens[i]->ref_processor()->weak_oops_do(root_closure);
   642   _old_gen->ref_processor()->weak_oops_do(root_closure);
   656   }
       
   657 }
   643 }
   658 
   644 
   659 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
   645 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
   660 void GenCollectedHeap::                                                 \
   646 void GenCollectedHeap::                                                 \
   661 oop_since_save_marks_iterate(int level,                                 \
   647 oop_since_save_marks_iterate(int level,                                 \
   662                              OopClosureType* cur,                       \
   648                              OopClosureType* cur,                       \
   663                              OopClosureType* older) {                   \
   649                              OopClosureType* older) {                   \
   664   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
   650   if (level == 0) {                                                     \
   665   for (int i = level+1; i < n_gens(); i++) {                            \
   651     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
   666     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
   652     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
       
   653   } else {                                                              \
       
   654     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
   667   }                                                                     \
   655   }                                                                     \
   668 }
   656 }
   669 
   657 
   670 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
   658 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
   671 
   659 
   672 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
   660 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
   673 
   661 
   674 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
   662 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
   675   for (int i = level; i < _n_gens; i++) {
   663   if (level == 0) {
   676     if (!_gens[i]->no_allocs_since_save_marks()) return false;
   664     if (!_young_gen->no_allocs_since_save_marks()) return false;
   677   }
   665   }
       
   666   if (!_old_gen->no_allocs_since_save_marks()) return false;
   678   return true;
   667   return true;
   679 }
   668 }
   680 
   669 
   681 bool GenCollectedHeap::supports_inline_contig_alloc() const {
   670 bool GenCollectedHeap::supports_inline_contig_alloc() const {
   682   return _gens[0]->supports_inline_contig_alloc();
   671   return _young_gen->supports_inline_contig_alloc();
   683 }
   672 }
   684 
   673 
   685 HeapWord** GenCollectedHeap::top_addr() const {
   674 HeapWord** GenCollectedHeap::top_addr() const {
   686   return _gens[0]->top_addr();
   675   return _young_gen->top_addr();
   687 }
   676 }
   688 
   677 
   689 HeapWord** GenCollectedHeap::end_addr() const {
   678 HeapWord** GenCollectedHeap::end_addr() const {
   690   return _gens[0]->end_addr();
   679   return _young_gen->end_addr();
   691 }
   680 }
   692 
   681 
   693 // public collection interfaces
   682 // public collection interfaces
   694 
   683 
   695 void GenCollectedHeap::collect(GCCause::Cause cause) {
   684 void GenCollectedHeap::collect(GCCause::Cause cause) {
   748 }
   737 }
   749 
   738 
   750 #if INCLUDE_ALL_GCS
   739 #if INCLUDE_ALL_GCS
   751 bool GenCollectedHeap::create_cms_collector() {
   740 bool GenCollectedHeap::create_cms_collector() {
   752 
   741 
   753   assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep,
   742   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
   754          "Unexpected generation kinds");
   743          "Unexpected generation kinds");
   755   // Skip two header words in the block content verification
   744   // Skip two header words in the block content verification
   756   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
   745   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
   757   CMSCollector* collector = new CMSCollector(
   746   CMSCollector* collector = new CMSCollector(
   758     (ConcurrentMarkSweepGeneration*)_gens[1],
   747     (ConcurrentMarkSweepGeneration*)_old_gen,
   759     _rem_set->as_CardTableRS(),
   748     _rem_set->as_CardTableRS(),
   760     (ConcurrentMarkSweepPolicy*) collector_policy());
   749     (ConcurrentMarkSweepPolicy*) collector_policy());
   761 
   750 
   762   if (collector == NULL || !collector->completed_initialization()) {
   751   if (collector == NULL || !collector->completed_initialization()) {
   763     if (collector) {
   752     if (collector) {
   820                   n_gens() - 1         /* max_level */);
   809                   n_gens() - 1         /* max_level */);
   821   }
   810   }
   822 }
   811 }
   823 
   812 
   824 bool GenCollectedHeap::is_in_young(oop p) {
   813 bool GenCollectedHeap::is_in_young(oop p) {
   825   bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
   814   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
   826   assert(result == _gens[0]->is_in_reserved(p),
   815   assert(result == _young_gen->is_in_reserved(p),
   827          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
   816          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
   828   return result;
   817   return result;
   829 }
   818 }
   830 
   819 
   831 // Returns "TRUE" iff "p" points into the committed areas of the heap.
   820 // Returns "TRUE" iff "p" points into the committed areas of the heap.
   841     VMError::fatal_error_in_progress(), "too expensive");
   830     VMError::fatal_error_in_progress(), "too expensive");
   842 
   831 
   843   #endif
   832   #endif
   844   // This might be sped up with a cache of the last generation that
   833   // This might be sped up with a cache of the last generation that
   845   // answered yes.
   834   // answered yes.
   846   for (int i = 0; i < _n_gens; i++) {
   835   if (_young_gen->is_in(p) || _old_gen->is_in(p)) {
   847     if (_gens[i]->is_in(p)) return true;
   836     return true;
   848   }
   837   }
   849   // Otherwise...
   838   // Otherwise...
   850   return false;
   839   return false;
   851 }
   840 }
   852 
   841 
   854 // Don't implement this by using is_in_young().  This method is used
   843 // Don't implement this by using is_in_young().  This method is used
   855 // in some cases to check that is_in_young() is correct.
   844 // in some cases to check that is_in_young() is correct.
   856 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
   845 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
   857   assert(is_in_reserved(p) || p == NULL,
   846   assert(is_in_reserved(p) || p == NULL,
   858     "Does not work if address is non-null and outside of the heap");
   847     "Does not work if address is non-null and outside of the heap");
   859   return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
   848   return p < _young_gen->reserved().end() && p != NULL;
   860 }
   849 }
   861 #endif
   850 #endif
   862 
   851 
   863 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
   852 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
   864   for (int i = 0; i < _n_gens; i++) {
   853   _young_gen->oop_iterate(cl);
   865     _gens[i]->oop_iterate(cl);
   854   _old_gen->oop_iterate(cl);
   866   }
       
   867 }
   855 }
   868 
   856 
   869 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
   857 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
   870   for (int i = 0; i < _n_gens; i++) {
   858   _young_gen->object_iterate(cl);
   871     _gens[i]->object_iterate(cl);
   859   _old_gen->object_iterate(cl);
   872   }
       
   873 }
   860 }
   874 
   861 
   875 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
   862 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
   876   for (int i = 0; i < _n_gens; i++) {
   863   _young_gen->safe_object_iterate(cl);
   877     _gens[i]->safe_object_iterate(cl);
   864   _old_gen->safe_object_iterate(cl);
   878   }
       
   879 }
   865 }
   880 
   866 
   881 Space* GenCollectedHeap::space_containing(const void* addr) const {
   867 Space* GenCollectedHeap::space_containing(const void* addr) const {
   882   for (int i = 0; i < _n_gens; i++) {
   868   Space* res = _young_gen->space_containing(addr);
   883     Space* res = _gens[i]->space_containing(addr);
   869   if (res != NULL) {
   884     if (res != NULL) return res;
   870     return res;
   885   }
   871   }
   886   // Otherwise...
   872   res = _old_gen->space_containing(addr);
   887   assert(false, "Could not find containing space");
   873   assert(res != NULL, "Could not find containing space");
   888   return NULL;
   874   return res;
   889 }
   875 }
   890 
       
   891 
   876 
   892 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
   877 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
   893   assert(is_in_reserved(addr), "block_start of address outside of heap");
   878   assert(is_in_reserved(addr), "block_start of address outside of heap");
   894   for (int i = 0; i < _n_gens; i++) {
   879   if (_young_gen->is_in_reserved(addr)) {
   895     if (_gens[i]->is_in_reserved(addr)) {
   880     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
   896       assert(_gens[i]->is_in(addr),
   881     return _young_gen->block_start(addr);
   897              "addr should be in allocated part of generation");
   882   }
   898       return _gens[i]->block_start(addr);
   883 
   899     }
   884   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
   900   }
   885   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
   901   assert(false, "Some generation should contain the address");
   886   return _old_gen->block_start(addr);
   902   return NULL;
       
   903 }
   887 }
   904 
   888 
   905 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
   889 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
   906   assert(is_in_reserved(addr), "block_size of address outside of heap");
   890   assert(is_in_reserved(addr), "block_size of address outside of heap");
   907   for (int i = 0; i < _n_gens; i++) {
   891   if (_young_gen->is_in_reserved(addr)) {
   908     if (_gens[i]->is_in_reserved(addr)) {
   892     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
   909       assert(_gens[i]->is_in(addr),
   893     return _young_gen->block_size(addr);
   910              "addr should be in allocated part of generation");
   894   }
   911       return _gens[i]->block_size(addr);
   895 
   912     }
   896   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
   913   }
   897   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
   914   assert(false, "Some generation should contain the address");
   898   return _old_gen->block_size(addr);
   915   return 0;
       
   916 }
   899 }
   917 
   900 
   918 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
   901 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
   919   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
   902   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
   920   assert(block_start(addr) == addr, "addr must be a block start");
   903   assert(block_start(addr) == addr, "addr must be a block start");
   921   for (int i = 0; i < _n_gens; i++) {
   904   if (_young_gen->is_in_reserved(addr)) {
   922     if (_gens[i]->is_in_reserved(addr)) {
   905     return _young_gen->block_is_obj(addr);
   923       return _gens[i]->block_is_obj(addr);
   906   }
   924     }
   907 
   925   }
   908   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
   926   assert(false, "Some generation should contain the address");
   909   return _old_gen->block_is_obj(addr);
   927   return false;
       
   928 }
   910 }
   929 
   911 
   930 bool GenCollectedHeap::supports_tlab_allocation() const {
   912 bool GenCollectedHeap::supports_tlab_allocation() const {
   931   for (int i = 0; i < _n_gens; i += 1) {
   913   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
   932     if (_gens[i]->supports_tlab_allocation()) {
   914   return _young_gen->supports_tlab_allocation();
   933       return true;
       
   934     }
       
   935   }
       
   936   return false;
       
   937 }
   915 }
   938 
   916 
   939 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
   917 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
   940   size_t result = 0;
   918   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
   941   for (int i = 0; i < _n_gens; i += 1) {
   919   if (_young_gen->supports_tlab_allocation()) {
   942     if (_gens[i]->supports_tlab_allocation()) {
   920     return _young_gen->tlab_capacity();
   943       result += _gens[i]->tlab_capacity();
   921   }
   944     }
   922   return 0;
   945   }
       
   946   return result;
       
   947 }
   923 }
   948 
   924 
   949 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
   925 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
   950   size_t result = 0;
   926   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
   951   for (int i = 0; i < _n_gens; i += 1) {
   927   if (_young_gen->supports_tlab_allocation()) {
   952     if (_gens[i]->supports_tlab_allocation()) {
   928     return _young_gen->tlab_used();
   953       result += _gens[i]->tlab_used();
   929   }
   954     }
   930   return 0;
   955   }
       
   956   return result;
       
   957 }
   931 }
   958 
   932 
   959 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   933 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   960   size_t result = 0;
   934   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
   961   for (int i = 0; i < _n_gens; i += 1) {
   935   if (_young_gen->supports_tlab_allocation()) {
   962     if (_gens[i]->supports_tlab_allocation()) {
   936     return _young_gen->unsafe_max_tlab_alloc();
   963       result += _gens[i]->unsafe_max_tlab_alloc();
   937   }
   964     }
   938   return 0;
   965   }
       
   966   return result;
       
   967 }
   939 }
   968 
   940 
   969 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
   941 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
   970   bool gc_overhead_limit_was_exceeded;
   942   bool gc_overhead_limit_was_exceeded;
   971   return collector_policy()->mem_allocate_work(size /* size */,
   943   return collector_policy()->mem_allocate_work(size /* size */,
  1010 }
   982 }
  1011 
   983 
  1012 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
   984 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
  1013                                                size_t max_alloc_words) {
   985                                                size_t max_alloc_words) {
  1014   ScratchBlock* res = NULL;
   986   ScratchBlock* res = NULL;
  1015   for (int i = 0; i < _n_gens; i++) {
   987   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
  1016     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
   988   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
  1017   }
       
  1018   sort_scratch_list(res);
   989   sort_scratch_list(res);
  1019   return res;
   990   return res;
  1020 }
   991 }
  1021 
   992 
  1022 void GenCollectedHeap::release_scratch() {
   993 void GenCollectedHeap::release_scratch() {
  1023   for (int i = 0; i < _n_gens; i++) {
   994   _young_gen->reset_scratch();
  1024     _gens[i]->reset_scratch();
   995   _old_gen->reset_scratch();
  1025   }
       
  1026 }
   996 }
  1027 
   997 
  1028 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
   998 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
  1029   void do_generation(Generation* gen) {
   999   void do_generation(Generation* gen) {
  1030     gen->prepare_for_verify();
  1000     gen->prepare_for_verify();
  1035   ensure_parsability(false);        // no need to retire TLABs
  1005   ensure_parsability(false);        // no need to retire TLABs
  1036   GenPrepareForVerifyClosure blk;
  1006   GenPrepareForVerifyClosure blk;
  1037   generation_iterate(&blk, false);
  1007   generation_iterate(&blk, false);
  1038 }
  1008 }
  1039 
  1009 
  1040 
       
  1041 void GenCollectedHeap::generation_iterate(GenClosure* cl,
  1010 void GenCollectedHeap::generation_iterate(GenClosure* cl,
  1042                                           bool old_to_young) {
  1011                                           bool old_to_young) {
  1043   if (old_to_young) {
  1012   if (old_to_young) {
  1044     for (int i = _n_gens-1; i >= 0; i--) {
  1013     cl->do_generation(_old_gen);
  1045       cl->do_generation(_gens[i]);
  1014     cl->do_generation(_young_gen);
  1046     }
       
  1047   } else {
  1015   } else {
  1048     for (int i = 0; i < _n_gens; i++) {
  1016     cl->do_generation(_young_gen);
  1049       cl->do_generation(_gens[i]);
  1017     cl->do_generation(_old_gen);
  1050     }
       
  1051   }
  1018   }
  1052 }
  1019 }
  1053 
  1020 
  1054 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
  1021 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
  1055   for (int i = 0; i < _n_gens; i++) {
  1022   _young_gen->space_iterate(cl, true);
  1056     _gens[i]->space_iterate(cl, true);
  1023   _old_gen->space_iterate(cl, true);
  1057   }
       
  1058 }
  1024 }
  1059 
  1025 
  1060 bool GenCollectedHeap::is_maximal_no_gc() const {
  1026 bool GenCollectedHeap::is_maximal_no_gc() const {
  1061   for (int i = 0; i < _n_gens; i++) {
  1027   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
  1062     if (!_gens[i]->is_maximal_no_gc()) {
       
  1063       return false;
       
  1064     }
       
  1065   }
       
  1066   return true;
       
  1067 }
  1028 }
  1068 
  1029 
  1069 void GenCollectedHeap::save_marks() {
  1030 void GenCollectedHeap::save_marks() {
  1070   for (int i = 0; i < _n_gens; i++) {
  1031   _young_gen->save_marks();
  1071     _gens[i]->save_marks();
  1032   _old_gen->save_marks();
  1072   }
       
  1073 }
  1033 }
  1074 
  1034 
  1075 GenCollectedHeap* GenCollectedHeap::heap() {
  1035 GenCollectedHeap* GenCollectedHeap::heap() {
  1076   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
  1036   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
  1077   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
  1037   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
  1079 }
  1039 }
  1080 
  1040 
  1081 
  1041 
  1082 void GenCollectedHeap::prepare_for_compaction() {
  1042 void GenCollectedHeap::prepare_for_compaction() {
  1083   guarantee(_n_gens = 2, "Wrong number of generations");
  1043   guarantee(_n_gens = 2, "Wrong number of generations");
  1084   Generation* old_gen = _gens[1];
  1044   Generation* old_gen = _old_gen;
  1085   // Start by compacting into same gen.
  1045   // Start by compacting into same gen.
  1086   CompactPoint cp(old_gen);
  1046   CompactPoint cp(old_gen);
  1087   old_gen->prepare_for_compaction(&cp);
  1047   old_gen->prepare_for_compaction(&cp);
  1088   Generation* young_gen = _gens[0];
  1048   Generation* young_gen = _young_gen;
  1089   young_gen->prepare_for_compaction(&cp);
  1049   young_gen->prepare_for_compaction(&cp);
  1090 }
  1050 }
  1091 
  1051 
  1092 GCStats* GenCollectedHeap::gc_stats(int level) const {
  1052 GCStats* GenCollectedHeap::gc_stats(int level) const {
  1093   return _gens[level]->gc_stats();
  1053   if (level == 0) {
       
  1054     return _young_gen->gc_stats();
       
  1055   } else {
       
  1056     return _old_gen->gc_stats();
       
  1057   }
  1094 }
  1058 }
  1095 
  1059 
  1096 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
  1060 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
  1097   for (int i = _n_gens-1; i >= 0; i--) {
  1061   if (!silent) {
  1098     Generation* g = _gens[i];
  1062     gclog_or_tty->print("%s", _old_gen->name());
  1099     if (!silent) {
  1063     gclog_or_tty->print(" ");
  1100       gclog_or_tty->print("%s", g->name());
  1064   }
  1101       gclog_or_tty->print(" ");
  1065   _old_gen->verify();
  1102     }
  1066 
  1103     g->verify();
  1067   if (!silent) {
  1104   }
  1068     gclog_or_tty->print("%s", _young_gen->name());
       
  1069     gclog_or_tty->print(" ");
       
  1070   }
       
  1071   _young_gen->verify();
       
  1072 
  1105   if (!silent) {
  1073   if (!silent) {
  1106     gclog_or_tty->print("remset ");
  1074     gclog_or_tty->print("remset ");
  1107   }
  1075   }
  1108   rem_set()->verify();
  1076   rem_set()->verify();
  1109 }
  1077 }
  1110 
  1078 
  1111 void GenCollectedHeap::print_on(outputStream* st) const {
  1079 void GenCollectedHeap::print_on(outputStream* st) const {
  1112   for (int i = 0; i < _n_gens; i++) {
  1080   _young_gen->print_on(st);
  1113     _gens[i]->print_on(st);
  1081   _old_gen->print_on(st);
  1114   }
       
  1115   MetaspaceAux::print_on(st);
  1082   MetaspaceAux::print_on(st);
  1116 }
  1083 }
  1117 
  1084 
  1118 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  1085 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  1119   if (workers() != NULL) {
  1086   if (workers() != NULL) {