hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
changeset 25490 59f226da8d81
parent 23540 06f7d6e1f654
child 25491 70fb742e40aa
equal deleted inserted replaced
25489:feb54edc509d 25490:59f226da8d81
   193   GenCollectedHeap* gch = GenCollectedHeap::heap();
   193   GenCollectedHeap* gch = GenCollectedHeap::heap();
   194   GCCauseSetter gccs(gch, _gc_cause);
   194   GCCauseSetter gccs(gch, _gc_cause);
   195   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
   195   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
   196 }
   196 }
   197 
   197 
       
   198 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
       
   199 #if INCLUDE_ALL_GCS
       
   200   if (UseConcMarkSweepGC || UseG1GC) {
       
   201     if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
       
   202       MetaspaceGC::set_should_concurrent_collect(true);
       
   203     } else if (UseG1GC) {
       
   204       G1CollectedHeap* g1h = G1CollectedHeap::heap();
       
   205       g1h->g1_policy()->set_initiate_conc_mark_if_possible();
       
   206 
       
   207       GCCauseSetter x(g1h, _gc_cause);
       
   208 
       
   209       // At this point we are supposed to start a concurrent cycle. We
       
   210       // will do so if one is not already in progress.
       
   211       bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
       
   212 
       
   213       if (should_start) {
       
   214         double pause_target = g1h->g1_policy()->max_pause_time_ms();
       
   215         g1h->do_collection_pause_at_safepoint(pause_target);
       
   216       }
       
   217     }
       
   218 
       
   219     return true;
       
   220   }
       
   221 #endif
       
   222   return false;
       
   223 }
       
   224 
       
   225 static void log_metaspace_alloc_failure_for_concurrent_GC() {
       
   226   if (Verbose && PrintGCDetails) {
       
   227     if (UseConcMarkSweepGC) {
       
   228       gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
       
   229     } else if (UseG1GC) {
       
   230       gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
       
   231     }
       
   232   }
       
   233 }
       
   234 
   198 void VM_CollectForMetadataAllocation::doit() {
   235 void VM_CollectForMetadataAllocation::doit() {
   199   SvcGCMarker sgcm(SvcGCMarker::FULL);
   236   SvcGCMarker sgcm(SvcGCMarker::FULL);
   200 
   237 
   201   CollectedHeap* heap = Universe::heap();
   238   CollectedHeap* heap = Universe::heap();
   202   GCCauseSetter gccs(heap, _gc_cause);
   239   GCCauseSetter gccs(heap, _gc_cause);
   204   // Check again if the space is available.  Another thread
   241   // Check again if the space is available.  Another thread
   205   // may have similarly failed a metadata allocation and induced
   242   // may have similarly failed a metadata allocation and induced
   206   // a GC that freed space for the allocation.
   243   // a GC that freed space for the allocation.
   207   if (!MetadataAllocationFailALot) {
   244   if (!MetadataAllocationFailALot) {
   208     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   245     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   209   }
   246     if (_result != NULL) {
   210 
   247       return;
   211   if (_result == NULL) {
   248     }
   212     if (UseConcMarkSweepGC) {
   249   }
   213       if (CMSClassUnloadingEnabled) {
   250 
   214         MetaspaceGC::set_should_concurrent_collect(true);
   251   if (initiate_concurrent_GC()) {
   215       }
   252     // For CMS and G1 expand since the collection is going to be concurrent.
   216       // For CMS expand since the collection is going to be concurrent.
   253     _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   217       _result =
   254     if (_result != NULL) {
   218         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   255       return;
   219     }
   256     }
   220     if (_result == NULL) {
   257 
   221       // Don't clear the soft refs yet.
   258     log_metaspace_alloc_failure_for_concurrent_GC();
   222       if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
   259   }
   223         gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
   260 
   224       }
   261   // Don't clear the soft refs yet.
   225       heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
   262   heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
   226       // After a GC try to allocate without expanding.  Could fail
   263   // After a GC try to allocate without expanding.  Could fail
   227       // and expansion will be tried below.
   264   // and expansion will be tried below.
   228       _result =
   265   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   229         _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   266   if (_result != NULL) {
   230     }
   267     return;
   231     if (_result == NULL) {
   268   }
   232       // If still failing, allow the Metaspace to expand.
   269 
   233       // See delta_capacity_until_GC() for explanation of the
   270   // If still failing, allow the Metaspace to expand.
   234       // amount of the expansion.
   271   // See delta_capacity_until_GC() for explanation of the
   235       // This should work unless there really is no more space
   272   // amount of the expansion.
   236       // or a MaxMetaspaceSize has been specified on the command line.
   273   // This should work unless there really is no more space
   237       _result =
   274   // or a MaxMetaspaceSize has been specified on the command line.
   238         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   275   _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   239       if (_result == NULL) {
   276   if (_result != NULL) {
   240         // If expansion failed, do a last-ditch collection and try allocating
   277     return;
   241         // again.  A last-ditch collection will clear softrefs.  This
   278   }
   242         // behavior is similar to the last-ditch collection done for perm
   279 
   243         // gen when it was full and a collection for failed allocation
   280   // If expansion failed, do a last-ditch collection and try allocating
   244         // did not free perm gen space.
   281   // again.  A last-ditch collection will clear softrefs.  This
   245         heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
   282   // behavior is similar to the last-ditch collection done for perm
   246         _result =
   283   // gen when it was full and a collection for failed allocation
   247           _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   284   // did not free perm gen space.
   248       }
   285   heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
   249     }
   286   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   250     if (Verbose && PrintGCDetails && _result == NULL) {
   287   if (_result != NULL) {
   251       gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
   288     return;
   252                              SIZE_FORMAT, _size);
   289   }
   253     }
   290 
   254   }
   291   if (Verbose && PrintGCDetails) {
   255 
   292     gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
   256   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
   293                            SIZE_FORMAT, _size);
       
   294   }
       
   295 
       
   296   if (GC_locker::is_active_and_needs_gc()) {
   257     set_gc_locked();
   297     set_gc_locked();
   258   }
   298   }
   259 }
   299 }