src/hotspot/share/gc/g1/g1CollectedHeap.cpp
changeset 59067 f080b08daace
parent 59062 6530de931b8e
child 59115 a129f10e1b9a
equal deleted inserted replaced
59066:439a147b2c0c 59067:f080b08daace
  2007     default:                                return is_user_requested_concurrent_full_gc(cause);
  2007     default:                                return is_user_requested_concurrent_full_gc(cause);
  2008   }
  2008   }
  2009 }
  2009 }
  2010 
  2010 
  2011 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
  2011 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
  2012   if(policy()->force_upgrade_to_full()) {
  2012   if (policy()->force_upgrade_to_full()) {
  2013     return true;
  2013     return true;
  2014   } else if (should_do_concurrent_full_gc(_gc_cause)) {
  2014   } else if (should_do_concurrent_full_gc(_gc_cause)) {
  2015     return false;
  2015     return false;
  2016   } else if (has_regions_left_for_allocation()) {
  2016   } else if (has_regions_left_for_allocation()) {
  2017     return false;
  2017     return false;
  2054 
  2054 
  2055   _old_marking_cycles_started++;
  2055   _old_marking_cycles_started++;
  2056 }
  2056 }
  2057 
  2057 
  2058 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
  2058 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
  2059   MonitorLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2059   MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
  2060 
  2060 
  2061   // We assume that if concurrent == true, then the caller is a
  2061   // We assume that if concurrent == true, then the caller is a
  2062   // concurrent thread that was joined the Suspendible Thread
  2062   // concurrent thread that was joined the Suspendible Thread
  2063   // Set. If there's ever a cheap way to check this, we should add an
  2063   // Set. If there's ever a cheap way to check this, we should add an
  2064   // assert here.
  2064   // assert here.
  2094   // incorrectly see that a marking cycle is still in progress.
  2094   // incorrectly see that a marking cycle is still in progress.
  2095   if (concurrent) {
  2095   if (concurrent) {
  2096     _cm_thread->set_idle();
  2096     _cm_thread->set_idle();
  2097   }
  2097   }
  2098 
  2098 
  2099   // This notify_all() will ensure that a thread that called
  2099   // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
  2100   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2100   // for a full GC to finish that their wait is over.
  2101   // and it's waiting for a full GC to finish will be woken up. It is
  2101   ml.notify_all();
  2102   // waiting in VM_G1CollectForAllocation::doit_epilogue().
       
  2103   FullGCCount_lock->notify_all();
       
  2104 }
  2102 }
  2105 
  2103 
  2106 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2104 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2107   try_collect(cause, true);
  2105   try_collect(cause);
  2108 }
  2106 }
  2109 
  2107 
  2110 bool G1CollectedHeap::try_collect(GCCause::Cause cause, bool retry_on_gc_failure) {
  2108 // Return true if (x < y) with allowance for wraparound.
       
  2109 static bool gc_counter_less_than(uint x, uint y) {
       
  2110   return (x - y) > (UINT_MAX/2);
       
  2111 }
       
  2112 
       
  2113 // LOG_COLLECT_CONCURRENTLY(cause, msg, args...)
       
  2114 // Macro so msg printing is format-checked.
       
  2115 #define LOG_COLLECT_CONCURRENTLY(cause, ...)                            \
       
  2116   do {                                                                  \
       
  2117     LogTarget(Trace, gc) LOG_COLLECT_CONCURRENTLY_lt;                   \
       
  2118     if (LOG_COLLECT_CONCURRENTLY_lt.is_enabled()) {                     \
       
  2119       ResourceMark rm; /* For thread name. */                           \
       
  2120       LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
       
  2121       LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \
       
  2122                                        Thread::current()->name(),       \
       
  2123                                        GCCause::to_string(cause));      \
       
  2124       LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__);                    \
       
  2125     }                                                                   \
       
  2126   } while (0)
       
  2127 
       
  2128 #define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
       
  2129   LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
       
  2130 
       
  2131 bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
       
  2132                                                uint gc_counter,
       
  2133                                                uint old_marking_started_before) {
  2111   assert_heap_not_locked();
  2134   assert_heap_not_locked();
  2112 
  2135   assert(should_do_concurrent_full_gc(cause),
  2113   bool gc_succeeded;
  2136          "Non-concurrent cause %s", GCCause::to_string(cause));
  2114   bool should_retry_gc;
  2137 
  2115 
  2138   for (uint i = 1; true; ++i) {
  2116   do {
  2139     // Try to schedule an initial-mark evacuation pause that will
  2117     should_retry_gc = false;
  2140     // start a concurrent cycle.
  2118 
  2141     LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
  2119     uint gc_count_before;
  2142     VM_G1TryInitiateConcMark op(gc_counter,
  2120     uint old_marking_count_before;
  2143                                 cause,
  2121     uint full_gc_count_before;
  2144                                 policy()->max_pause_time_ms());
  2122 
  2145     VMThread::execute(&op);
       
  2146 
       
  2147     // Request is trivially finished.
       
  2148     if (cause == GCCause::_g1_periodic_collection) {
       
  2149       LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, op.gc_succeeded());
       
  2150       return op.gc_succeeded();
       
  2151     }
       
  2152 
       
  2153     // Lock to get consistent set of values.
       
  2154     uint old_marking_started_after;
       
  2155     uint old_marking_completed_after;
  2123     {
  2156     {
  2124       MutexLocker ml(Heap_lock);
  2157       MutexLocker ml(Heap_lock);
  2125 
  2158       // Update gc_counter for retrying VMOp if needed. Captured here to be
  2126       // Read the GC count while holding the Heap_lock
  2159       // consistent with the values we use below for termination tests.  If
  2127       gc_count_before = total_collections();
  2160       // a retry is needed after a possible wait, and another collection
  2128       full_gc_count_before = total_full_collections();
  2161       // occurs in the meantime, it will cause our retry to be skipped and
  2129       old_marking_count_before = _old_marking_cycles_started;
  2162       // we'll recheck for termination with updated conditions from that
  2130     }
  2163       // more recent collection.  That's what we want, rather than having
  2131 
  2164       // our retry possibly perform an unnecessary collection.
  2132     if (should_do_concurrent_full_gc(cause)) {
  2165       gc_counter = total_collections();
  2133       // Schedule an initial-mark evacuation pause that will start a
  2166       old_marking_started_after = _old_marking_cycles_started;
  2134       // concurrent cycle. We're setting word_size to 0 which means that
  2167       old_marking_completed_after = _old_marking_cycles_completed;
  2135       // we are not requesting a post-GC allocation.
  2168     }
  2136       VM_G1CollectForAllocation op(0,     /* word_size */
  2169 
  2137                                    gc_count_before,
  2170     if (!GCCause::is_user_requested_gc(cause)) {
  2138                                    cause,
  2171       // For an "automatic" (not user-requested) collection, we just need to
  2139                                    true,  /* should_initiate_conc_mark */
  2172       // ensure that progress is made.
  2140                                    policy()->max_pause_time_ms());
  2173       //
  2141       VMThread::execute(&op);
  2174       // Request is finished if any of
  2142       gc_succeeded = op.gc_succeeded();
  2175       // (1) the VMOp successfully performed a GC,
  2143       if (!gc_succeeded && retry_on_gc_failure) {
  2176       // (2) a concurrent cycle was already in progress,
  2144         if (old_marking_count_before == _old_marking_cycles_started) {
  2177       // (3) a new cycle was started (by this thread or some other), or
  2145           should_retry_gc = op.should_retry_gc();
  2178       // (4) a Full GC was performed.
  2146         } else {
  2179       // Cases (3) and (4) are detected together by a change to
  2147           // A Full GC happened while we were trying to schedule the
  2180       // _old_marking_cycles_started.
  2148           // concurrent cycle. No point in starting a new cycle given
  2181       //
  2149           // that the whole heap was collected anyway.
  2182       // Note that (1) does not imply (3).  If we're still in the mixed
       
  2183       // phase of an earlier concurrent collection, the request to make the
       
  2184       // collection an initial-mark won't be honored.  If we don't check for
       
  2185       // both conditions we'll spin doing back-to-back collections.
       
  2186       if (op.gc_succeeded() ||
       
  2187           op.cycle_already_in_progress() ||
       
  2188           (old_marking_started_before != old_marking_started_after)) {
       
  2189         LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
       
  2190         return true;
       
  2191       }
       
  2192     } else {                    // User-requested GC.
       
  2193       // For a user-requested collection, we want to ensure that a complete
       
  2194       // full collection has been performed before returning, but without
       
  2195       // waiting for more than needed.
       
  2196 
       
  2197       // For user-requested GCs (unlike non-UR), a successful VMOp implies a
       
  2198       // new cycle was started.  That's good, because it's not clear what we
       
  2199       // should do otherwise.  Trying again just does back to back GCs.
       
  2200       // Can't wait for someone else to start a cycle.  And returning fails
       
  2201       // to meet the goal of ensuring a full collection was performed.
       
  2202       assert(!op.gc_succeeded() ||
       
  2203              (old_marking_started_before != old_marking_started_after),
       
  2204              "invariant: succeeded %s, started before %u, started after %u",
       
  2205              BOOL_TO_STR(op.gc_succeeded()),
       
  2206              old_marking_started_before, old_marking_started_after);
       
  2207 
       
  2208       // Request is finished if a full collection (concurrent or stw)
       
  2209       // was started after this request and has completed, e.g.
       
  2210       // started_before < completed_after.
       
  2211       if (gc_counter_less_than(old_marking_started_before,
       
  2212                                old_marking_completed_after)) {
       
  2213         LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
       
  2214         return true;
       
  2215       }
       
  2216 
       
  2217       if (old_marking_started_after != old_marking_completed_after) {
       
  2218         // If there is an in-progress cycle (possibly started by us), then
       
  2219         // wait for that cycle to complete, e.g.
       
  2220         // while completed_now < started_after.
       
  2221         LOG_COLLECT_CONCURRENTLY(cause, "wait");
       
  2222         MonitorLocker ml(G1OldGCCount_lock);
       
  2223         while (gc_counter_less_than(_old_marking_cycles_completed,
       
  2224                                     old_marking_started_after)) {
       
  2225           ml.wait();
  2150         }
  2226         }
  2151 
  2227         // Request is finished if the collection we just waited for was
  2152         if (should_retry_gc && GCLocker::is_active_and_needs_gc()) {
  2228         // started after this request.
  2153           GCLocker::stall_until_clear();
  2229         if (old_marking_started_before != old_marking_started_after) {
       
  2230           LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
       
  2231           return true;
  2154         }
  2232         }
  2155       }
  2233       }
  2156     } else if (GCLocker::should_discard(cause, gc_count_before)) {
  2234 
  2157       // Return false to be consistent with VMOp failure due to
  2235       // If VMOp was successful then it started a new cycle that the above
  2158       // another collection slipping in after our gc_count but before
  2236       // wait &etc should have recognized as finishing this request.  This
  2159       // our request is processed.  _gc_locker collections upgraded by
  2237       // differs from a non-user-request, where gc_succeeded does not imply
  2160       // GCLockerInvokesConcurrent are handled above and never discarded.
  2238       // a new cycle was started.
  2161       return false;
  2239       assert(!op.gc_succeeded(), "invariant");
  2162     } else {
  2240 
  2163       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
  2241       // If VMOp failed because a cycle was already in progress, it is now
  2164           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2242       // complete.  But it didn't finish this user-requested GC, so try
  2165 
  2243       // again.
  2166         // Schedule a standard evacuation pause. We're setting word_size
  2244       if (op.cycle_already_in_progress()) {
  2167         // to 0 which means that we are not requesting a post-GC allocation.
  2245         LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
  2168         VM_G1CollectForAllocation op(0,     /* word_size */
  2246         continue;
  2169                                      gc_count_before,
       
  2170                                      cause,
       
  2171                                      false, /* should_initiate_conc_mark */
       
  2172                                      policy()->max_pause_time_ms());
       
  2173         VMThread::execute(&op);
       
  2174         gc_succeeded = op.gc_succeeded();
       
  2175       } else {
       
  2176         // Schedule a Full GC.
       
  2177         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
       
  2178         VMThread::execute(&op);
       
  2179         gc_succeeded = op.gc_succeeded();
       
  2180       }
  2247       }
  2181     }
  2248     }
  2182   } while (should_retry_gc);
  2249 
  2183   return gc_succeeded;
  2250     // Collection failed and should be retried.
       
  2251     assert(op.transient_failure(), "invariant");
       
  2252 
       
  2253     // If GCLocker is active, wait until clear before retrying.
       
  2254     if (GCLocker::is_active_and_needs_gc()) {
       
  2255       LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall");
       
  2256       GCLocker::stall_until_clear();
       
  2257     }
       
  2258 
       
  2259     LOG_COLLECT_CONCURRENTLY(cause, "retry");
       
  2260   }
       
  2261 }
       
  2262 
       
  2263 bool G1CollectedHeap::try_collect(GCCause::Cause cause) {
       
  2264   assert_heap_not_locked();
       
  2265 
       
  2266   // Lock to get consistent set of values.
       
  2267   uint gc_count_before;
       
  2268   uint full_gc_count_before;
       
  2269   uint old_marking_started_before;
       
  2270   {
       
  2271     MutexLocker ml(Heap_lock);
       
  2272     gc_count_before = total_collections();
       
  2273     full_gc_count_before = total_full_collections();
       
  2274     old_marking_started_before = _old_marking_cycles_started;
       
  2275   }
       
  2276 
       
  2277   if (should_do_concurrent_full_gc(cause)) {
       
  2278     return try_collect_concurrently(cause,
       
  2279                                     gc_count_before,
       
  2280                                     old_marking_started_before);
       
  2281   } else if (GCLocker::should_discard(cause, gc_count_before)) {
       
  2282     // Indicate failure to be consistent with VMOp failure due to
       
  2283     // another collection slipping in after our gc_count but before
       
  2284     // our request is processed.  _gc_locker collections upgraded by
       
  2285     // GCLockerInvokesConcurrent are handled above and never discarded.
       
  2286     return false;
       
  2287   } else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
       
  2288              DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
       
  2289 
       
  2290     // Schedule a standard evacuation pause. We're setting word_size
       
  2291     // to 0 which means that we are not requesting a post-GC allocation.
       
  2292     VM_G1CollectForAllocation op(0,     /* word_size */
       
  2293                                  gc_count_before,
       
  2294                                  cause,
       
  2295                                  policy()->max_pause_time_ms());
       
  2296     VMThread::execute(&op);
       
  2297     return op.gc_succeeded();
       
  2298   } else {
       
  2299     // Schedule a Full GC.
       
  2300     VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
       
  2301     VMThread::execute(&op);
       
  2302     return op.gc_succeeded();
       
  2303   }
  2184 }
  2304 }
  2185 
  2305 
  2186 bool G1CollectedHeap::is_in(const void* p) const {
  2306 bool G1CollectedHeap::is_in(const void* p) const {
  2187   if (_hrm->reserved().contains(p)) {
  2307   if (_hrm->reserved().contains(p)) {
  2188     // Given that we know that p is in the reserved space,
  2308     // Given that we know that p is in the reserved space,
  2609                                                GCCause::Cause gc_cause) {
  2729                                                GCCause::Cause gc_cause) {
  2610   assert_heap_not_locked_and_not_at_safepoint();
  2730   assert_heap_not_locked_and_not_at_safepoint();
  2611   VM_G1CollectForAllocation op(word_size,
  2731   VM_G1CollectForAllocation op(word_size,
  2612                                gc_count_before,
  2732                                gc_count_before,
  2613                                gc_cause,
  2733                                gc_cause,
  2614                                false, /* should_initiate_conc_mark */
       
  2615                                policy()->max_pause_time_ms());
  2734                                policy()->max_pause_time_ms());
  2616   VMThread::execute(&op);
  2735   VMThread::execute(&op);
  2617 
  2736 
  2618   HeapWord* result = op.result();
  2737   HeapWord* result = op.result();
  2619   bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded();
  2738   bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded();