hotspot/src/share/vm/gc/shared/vmGCOperations.cpp
changeset 30764 fec48bf5a827
parent 30171 a7606ea92e05
child 31331 a7c714b6cfb3
equal deleted inserted replaced
30614:e45861098f5a 30764:fec48bf5a827
       
     1 /*
       
     2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "classfile/classLoader.hpp"
       
    27 #include "classfile/javaClasses.hpp"
       
    28 #include "gc/shared/gcLocker.inline.hpp"
       
    29 #include "gc/shared/genCollectedHeap.hpp"
       
    30 #include "gc/shared/vmGCOperations.hpp"
       
    31 #include "memory/oopFactory.hpp"
       
    32 #include "oops/instanceKlass.hpp"
       
    33 #include "oops/instanceRefKlass.hpp"
       
    34 #include "runtime/handles.inline.hpp"
       
    35 #include "runtime/init.hpp"
       
    36 #include "runtime/interfaceSupport.hpp"
       
    37 #include "utilities/dtrace.hpp"
       
    38 #include "utilities/macros.hpp"
       
    39 #include "utilities/preserveException.hpp"
       
    40 #if INCLUDE_ALL_GCS
       
    41 #include "gc/g1/g1CollectedHeap.inline.hpp"
       
    42 #endif // INCLUDE_ALL_GCS
       
    43 
       
    44 VM_GC_Operation::~VM_GC_Operation() {
       
    45   CollectedHeap* ch = Universe::heap();
       
    46   ch->collector_policy()->set_all_soft_refs_clear(false);
       
    47 }
       
    48 
       
    49 // The same dtrace probe can't be inserted in two different files, so we
       
    50 // have to call it here, so it's only in one file.  Can't create new probes
       
    51 // for the other file anymore.   The dtrace probes have to remain stable.
       
    52 void VM_GC_Operation::notify_gc_begin(bool full) {
       
    53   HOTSPOT_GC_BEGIN(
       
    54                    full);
       
    55   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
       
    56 }
       
    57 
       
    58 void VM_GC_Operation::notify_gc_end() {
       
    59   HOTSPOT_GC_END();
       
    60   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
       
    61 }
       
    62 
       
    63 void VM_GC_Operation::acquire_pending_list_lock() {
       
    64   // we may enter this with pending exception set
       
    65   InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
       
    66 }
       
    67 
       
    68 
       
    69 void VM_GC_Operation::release_and_notify_pending_list_lock() {
       
    70 
       
    71   InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
       
    72 }
       
    73 
       
    74 // Allocations may fail in several threads at about the same time,
       
    75 // resulting in multiple gc requests.  We only want to do one of them.
       
    76 // In case a GC locker is active and the need for a GC is already signaled,
       
    77 // we want to skip this GC attempt altogether, without doing a futile
       
    78 // safepoint operation.
       
    79 bool VM_GC_Operation::skip_operation() const {
       
    80   bool skip = (_gc_count_before != Universe::heap()->total_collections());
       
    81   if (_full && skip) {
       
    82     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
       
    83   }
       
    84   if (!skip && GC_locker::is_active_and_needs_gc()) {
       
    85     skip = Universe::heap()->is_maximal_no_gc();
       
    86     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
       
    87            "GC_locker cannot be active when initiating GC");
       
    88   }
       
    89   return skip;
       
    90 }
       
    91 
       
    92 bool VM_GC_Operation::doit_prologue() {
       
    93   assert(Thread::current()->is_Java_thread(), "just checking");
       
    94   assert(((_gc_cause != GCCause::_no_gc) &&
       
    95           (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
       
    96 
       
    97   // To be able to handle a GC the VM initialization needs to be completed.
       
    98   if (!is_init_completed()) {
       
    99     vm_exit_during_initialization(
       
   100       err_msg("GC triggered before VM initialization completed. Try increasing "
       
   101               "NewSize, current value " SIZE_FORMAT "%s.",
       
   102               byte_size_in_proper_unit(NewSize),
       
   103               proper_unit_for_byte_size(NewSize)));
       
   104   }
       
   105 
       
   106   acquire_pending_list_lock();
       
   107   // If the GC count has changed someone beat us to the collection
       
   108   // Get the Heap_lock after the pending_list_lock.
       
   109   Heap_lock->lock();
       
   110 
       
   111   // Check invocations
       
   112   if (skip_operation()) {
       
   113     // skip collection
       
   114     Heap_lock->unlock();
       
   115     release_and_notify_pending_list_lock();
       
   116     _prologue_succeeded = false;
       
   117   } else {
       
   118     _prologue_succeeded = true;
       
   119   }
       
   120   return _prologue_succeeded;
       
   121 }
       
   122 
       
   123 
       
   124 void VM_GC_Operation::doit_epilogue() {
       
   125   assert(Thread::current()->is_Java_thread(), "just checking");
       
   126   // Release the Heap_lock first.
       
   127   Heap_lock->unlock();
       
   128   release_and_notify_pending_list_lock();
       
   129 }
       
   130 
       
   131 bool VM_GC_HeapInspection::skip_operation() const {
       
   132   return false;
       
   133 }
       
   134 
       
   135 bool VM_GC_HeapInspection::collect() {
       
   136   if (GC_locker::is_active()) {
       
   137     return false;
       
   138   }
       
   139   Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
       
   140   return true;
       
   141 }
       
   142 
       
   143 void VM_GC_HeapInspection::doit() {
       
   144   HandleMark hm;
       
   145   Universe::heap()->ensure_parsability(false); // must happen, even if collection does
       
   146                                                // not happen (e.g. due to GC_locker)
       
   147                                                // or _full_gc being false
       
   148   if (_full_gc) {
       
   149     if (!collect()) {
       
   150       // The collection attempt was skipped because the gc locker is held.
       
   151       // The following dump may then be a tad misleading to someone expecting
       
   152       // only live objects to show up in the dump (see CR 6944195). Just issue
       
   153       // a suitable warning in that case and do not attempt to do a collection.
       
   154       // The latter is a subtle point, because even a failed attempt
       
   155       // to GC will, in fact, induce one in the future, which we
       
   156       // probably want to avoid in this case because the GC that we may
       
   157       // be about to attempt holds value for us only
       
   158       // if it happens now and not if it happens in the eventual
       
   159       // future.
       
   160       warning("GC locker is held; pre-dump GC was skipped");
       
   161     }
       
   162   }
       
   163   HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
       
   164                          _columns);
       
   165   inspect.heap_inspection(_out);
       
   166 }
       
   167 
       
   168 
       
   169 void VM_GenCollectForAllocation::doit() {
       
   170   SvcGCMarker sgcm(SvcGCMarker::MINOR);
       
   171 
       
   172   GenCollectedHeap* gch = GenCollectedHeap::heap();
       
   173   GCCauseSetter gccs(gch, _gc_cause);
       
   174   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
       
   175   assert(gch->is_in_reserved_or_null(_result), "result not in heap");
       
   176 
       
   177   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
       
   178     set_gc_locked();
       
   179   }
       
   180 }
       
   181 
       
   182 void VM_GenCollectFull::doit() {
       
   183   SvcGCMarker sgcm(SvcGCMarker::FULL);
       
   184 
       
   185   GenCollectedHeap* gch = GenCollectedHeap::heap();
       
   186   GCCauseSetter gccs(gch, _gc_cause);
       
   187   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
       
   188 }
       
   189 
       
   190 // Returns true iff concurrent GCs unloads metadata.
       
   191 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
       
   192 #if INCLUDE_ALL_GCS
       
   193   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
       
   194     MetaspaceGC::set_should_concurrent_collect(true);
       
   195     return true;
       
   196   }
       
   197 
       
   198   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
       
   199     G1CollectedHeap* g1h = G1CollectedHeap::heap();
       
   200     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
       
   201 
       
   202     GCCauseSetter x(g1h, _gc_cause);
       
   203 
       
   204     // At this point we are supposed to start a concurrent cycle. We
       
   205     // will do so if one is not already in progress.
       
   206     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
       
   207 
       
   208     if (should_start) {
       
   209       double pause_target = g1h->g1_policy()->max_pause_time_ms();
       
   210       g1h->do_collection_pause_at_safepoint(pause_target);
       
   211     }
       
   212     return true;
       
   213   }
       
   214 #endif
       
   215 
       
   216   return false;
       
   217 }
       
   218 
       
   219 static void log_metaspace_alloc_failure_for_concurrent_GC() {
       
   220   if (Verbose && PrintGCDetails) {
       
   221     if (UseConcMarkSweepGC) {
       
   222       gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
       
   223     } else if (UseG1GC) {
       
   224       gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
       
   225     }
       
   226   }
       
   227 }
       
   228 
       
   229 void VM_CollectForMetadataAllocation::doit() {
       
   230   SvcGCMarker sgcm(SvcGCMarker::FULL);
       
   231 
       
   232   CollectedHeap* heap = Universe::heap();
       
   233   GCCauseSetter gccs(heap, _gc_cause);
       
   234 
       
   235   // Check again if the space is available.  Another thread
       
   236   // may have similarly failed a metadata allocation and induced
       
   237   // a GC that freed space for the allocation.
       
   238   if (!MetadataAllocationFailALot) {
       
   239     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
       
   240     if (_result != NULL) {
       
   241       return;
       
   242     }
       
   243   }
       
   244 
       
   245   if (initiate_concurrent_GC()) {
       
   246     // For CMS and G1 expand since the collection is going to be concurrent.
       
   247     _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
       
   248     if (_result != NULL) {
       
   249       return;
       
   250     }
       
   251 
       
   252     log_metaspace_alloc_failure_for_concurrent_GC();
       
   253   }
       
   254 
       
   255   // Don't clear the soft refs yet.
       
   256   heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
       
   257   // After a GC try to allocate without expanding.  Could fail
       
   258   // and expansion will be tried below.
       
   259   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
       
   260   if (_result != NULL) {
       
   261     return;
       
   262   }
       
   263 
       
   264   // If still failing, allow the Metaspace to expand.
       
   265   // See delta_capacity_until_GC() for explanation of the
       
   266   // amount of the expansion.
       
   267   // This should work unless there really is no more space
       
   268   // or a MaxMetaspaceSize has been specified on the command line.
       
   269   _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
       
   270   if (_result != NULL) {
       
   271     return;
       
   272   }
       
   273 
       
   274   // If expansion failed, do a last-ditch collection and try allocating
       
   275   // again.  A last-ditch collection will clear softrefs.  This
       
   276   // behavior is similar to the last-ditch collection done for perm
       
   277   // gen when it was full and a collection for failed allocation
       
   278   // did not free perm gen space.
       
   279   heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
       
   280   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
       
   281   if (_result != NULL) {
       
   282     return;
       
   283   }
       
   284 
       
   285   if (Verbose && PrintGCDetails) {
       
   286     gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
       
   287                            SIZE_FORMAT, _size);
       
   288   }
       
   289 
       
   290   if (GC_locker::is_active_and_needs_gc()) {
       
   291     set_gc_locked();
       
   292   }
       
   293 }