src/hotspot/share/gc/g1/g1CollectedHeap.hpp
changeset 59067 f080b08daace
parent 59062 6530de931b8e
child 59115 a129f10e1b9a
equal deleted inserted replaced
59066:439a147b2c0c 59067:f080b08daace
   131 
   131 
   132 class G1CollectedHeap : public CollectedHeap {
   132 class G1CollectedHeap : public CollectedHeap {
   133   friend class VM_CollectForMetadataAllocation;
   133   friend class VM_CollectForMetadataAllocation;
   134   friend class VM_G1CollectForAllocation;
   134   friend class VM_G1CollectForAllocation;
   135   friend class VM_G1CollectFull;
   135   friend class VM_G1CollectFull;
       
   136   friend class VM_G1TryInitiateConcMark;
   136   friend class VMStructs;
   137   friend class VMStructs;
   137   friend class MutatorAllocRegion;
   138   friend class MutatorAllocRegion;
   138   friend class G1FullCollector;
   139   friend class G1FullCollector;
   139   friend class G1GCAllocRegion;
   140   friend class G1GCAllocRegion;
   140   friend class G1HeapVerifier;
   141   friend class G1HeapVerifier;
   257   // If not, we can skip a few steps.
   258   // If not, we can skip a few steps.
   258   bool _has_humongous_reclaim_candidates;
   259   bool _has_humongous_reclaim_candidates;
   259 
   260 
   260   G1HRPrinter _hr_printer;
   261   G1HRPrinter _hr_printer;
   261 
   262 
   262   // It decides whether an explicit GC should start a concurrent cycle
   263   // Return true if an explicit GC should start a concurrent cycle instead
   263   // instead of doing a STW GC. Currently, a concurrent cycle is
   264   // of doing a STW full GC. A concurrent cycle should be started if:
   264   // explicitly started if:
   265   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent,
   265   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
   266   // (b) cause == _g1_humongous_allocation,
   266   // (b) cause == _g1_humongous_allocation
   267   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
   267   // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
   268   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
   268   // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
   269   // (e) cause == _wb_conc_mark,
   269   // (e) cause == _wb_conc_mark
   270   // (f) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
   270   bool should_do_concurrent_full_gc(GCCause::Cause cause);
   271   bool should_do_concurrent_full_gc(GCCause::Cause cause);
       
   272 
       
   273   // Attempt to start a concurrent cycle with the indicated cause.
       
   274   // precondition: should_do_concurrent_full_gc(cause)
       
   275   bool try_collect_concurrently(GCCause::Cause cause,
       
   276                                 uint gc_counter,
       
   277                                 uint old_marking_started_before);
   271 
   278 
   272   // Return true if should upgrade to full gc after an incremental one.
   279   // Return true if should upgrade to full gc after an incremental one.
   273   bool should_upgrade_to_full_gc(GCCause::Cause cause);
   280   bool should_upgrade_to_full_gc(GCCause::Cause cause);
   274 
   281 
   275   // indicates whether we are in young or mixed GC mode
   282   // indicates whether we are in young or mixed GC mode
   628   // tighter consistency checking in the method. If concurrent is
   635   // tighter consistency checking in the method. If concurrent is
   629   // false, the caller is the inner caller in the nesting (i.e., the
   636   // false, the caller is the inner caller in the nesting (i.e., the
   630   // Full GC). If concurrent is true, the caller is the outer caller
   637   // Full GC). If concurrent is true, the caller is the outer caller
   631   // in this nesting (i.e., the concurrent cycle). Further nesting is
   638   // in this nesting (i.e., the concurrent cycle). Further nesting is
   632   // not currently supported. The end of this call also notifies
   639   // not currently supported. The end of this call also notifies
   633   // the FullGCCount_lock in case a Java thread is waiting for a full
   640   // the G1OldGCCount_lock in case a Java thread is waiting for a full
   634   // GC to happen (e.g., it called System.gc() with
   641   // GC to happen (e.g., it called System.gc() with
   635   // +ExplicitGCInvokesConcurrent).
   642   // +ExplicitGCInvokesConcurrent).
   636   void increment_old_marking_cycles_completed(bool concurrent);
   643   void increment_old_marking_cycles_completed(bool concurrent);
   637 
   644 
   638   uint old_marking_cycles_completed() {
   645   uint old_marking_cycles_completed() {
  1086   // Perform a collection of the heap; intended for use in implementing
  1093   // Perform a collection of the heap; intended for use in implementing
  1087   // "System.gc".  This probably implies as full a collection as the
  1094   // "System.gc".  This probably implies as full a collection as the
  1088   // "CollectedHeap" supports.
  1095   // "CollectedHeap" supports.
  1089   virtual void collect(GCCause::Cause cause);
  1096   virtual void collect(GCCause::Cause cause);
  1090 
  1097 
  1091   // Perform a collection of the heap with the given cause; if the VM operation
  1098   // Perform a collection of the heap with the given cause.
  1092   // fails to execute for any reason, retry only if retry_on_gc_failure is set.
       
  1093   // Returns whether this collection actually executed.
  1099   // Returns whether this collection actually executed.
  1094   bool try_collect(GCCause::Cause cause, bool retry_on_gc_failure);
  1100   bool try_collect(GCCause::Cause cause);
  1095 
  1101 
  1096   // True iff an evacuation has failed in the most-recent collection.
  1102   // True iff an evacuation has failed in the most-recent collection.
  1097   bool evacuation_failed() { return _evacuation_failed; }
  1103   bool evacuation_failed() { return _evacuation_failed; }
  1098 
  1104 
  1099   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
  1105   void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);