src/hotspot/share/gc/g1/g1CollectedHeap.cpp
changeset 48631 862c41cf1c7f
parent 48179 34fe70d22e9c
child 48637 7bba05746c44
equal deleted inserted replaced
48630:fdef4da95080 48631:862c41cf1c7f
     1 /*
     1 /*
     2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
   443 
   443 
   444 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   444 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   445   assert_heap_not_locked_and_not_at_safepoint();
   445   assert_heap_not_locked_and_not_at_safepoint();
   446   assert(!is_humongous(word_size), "we do not allow humongous TLABs");
   446   assert(!is_humongous(word_size), "we do not allow humongous TLABs");
   447 
   447 
   448   uint dummy_gc_count_before;
   448   return attempt_allocation(word_size);
   449   uint dummy_gclocker_retry_count = 0;
       
   450   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
       
   451 }
   449 }
   452 
   450 
   453 HeapWord*
   451 HeapWord*
   454 G1CollectedHeap::mem_allocate(size_t word_size,
   452 G1CollectedHeap::mem_allocate(size_t word_size,
   455                               bool*  gc_overhead_limit_was_exceeded) {
   453                               bool*  gc_overhead_limit_was_exceeded) {
   456   assert_heap_not_locked_and_not_at_safepoint();
   454   assert_heap_not_locked_and_not_at_safepoint();
   457 
   455 
   458   // Loop until the allocation is satisfied, or unsatisfied after GC.
   456   if (is_humongous(word_size)) {
   459   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
   457     return attempt_allocation_humongous(word_size);
   460     uint gc_count_before;
   458   }
   461 
   459   return attempt_allocation(word_size);
   462     HeapWord* result = NULL;
       
   463     if (!is_humongous(word_size)) {
       
   464       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
       
   465     } else {
       
   466       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
       
   467     }
       
   468     if (result != NULL) {
       
   469       return result;
       
   470     }
       
   471 
       
   472     // Create the garbage collection operation...
       
   473     VM_G1CollectForAllocation op(gc_count_before, word_size);
       
   474     op.set_allocation_context(AllocationContext::current());
       
   475 
       
   476     // ...and get the VM thread to execute it.
       
   477     VMThread::execute(&op);
       
   478 
       
   479     if (op.prologue_succeeded() && op.pause_succeeded()) {
       
   480       // If the operation was successful we'll return the result even
       
   481       // if it is NULL. If the allocation attempt failed immediately
       
   482       // after a Full GC, it's unlikely we'll be able to allocate now.
       
   483       HeapWord* result = op.result();
       
   484       if (result != NULL && !is_humongous(word_size)) {
       
   485         // Allocations that take place on VM operations do not do any
       
   486         // card dirtying and we have to do it here. We only have to do
       
   487         // this for non-humongous allocations, though.
       
   488         dirty_young_block(result, word_size);
       
   489       }
       
   490       return result;
       
   491     } else {
       
   492       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
       
   493         return NULL;
       
   494       }
       
   495       assert(op.result() == NULL,
       
   496              "the result should be NULL if the VM op did not succeed");
       
   497     }
       
   498 
       
   499     // Give a warning if we seem to be looping forever.
       
   500     if ((QueuedAllocationWarningCount > 0) &&
       
   501         (try_count % QueuedAllocationWarningCount == 0)) {
       
   502       log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
       
   503     }
       
   504   }
       
   505 
       
   506   ShouldNotReachHere();
       
   507   return NULL;
       
   508 }
   460 }
   509 
   461 
   510 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   462 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   511                                                    AllocationContext_t context,
   463                                                    AllocationContext_t context) {
   512                                                    uint* gc_count_before_ret,
   464   ResourceMark rm; // For retrieving the thread names in log messages.
   513                                                    uint* gclocker_retry_count_ret) {
   465 
   514   // Make sure you read the note in attempt_allocation_humongous().
   466   // Make sure you read the note in attempt_allocation_humongous().
   515 
   467 
   516   assert_heap_not_locked_and_not_at_safepoint();
   468   assert_heap_not_locked_and_not_at_safepoint();
   517   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
   469   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
   518          "be called for humongous allocation requests");
   470          "be called for humongous allocation requests");
   523   // We will loop until a) we manage to successfully perform the
   475   // We will loop until a) we manage to successfully perform the
   524   // allocation or b) we successfully schedule a collection which
   476   // allocation or b) we successfully schedule a collection which
   525   // fails to perform the allocation. b) is the only case when we'll
   477   // fails to perform the allocation. b) is the only case when we'll
   526   // return NULL.
   478   // return NULL.
   527   HeapWord* result = NULL;
   479   HeapWord* result = NULL;
   528   for (int try_count = 1; /* we'll return */; try_count += 1) {
   480   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
   529     bool should_try_gc;
   481     bool should_try_gc;
   530     uint gc_count_before;
   482     uint gc_count_before;
   531 
   483 
   532     {
   484     {
   533       MutexLockerEx x(Heap_lock);
   485       MutexLockerEx x(Heap_lock);
   534       result = _allocator->attempt_allocation_locked(word_size, context);
   486       result = _allocator->attempt_allocation_locked(word_size, context);
   535       if (result != NULL) {
   487       if (result != NULL) {
   536         return result;
   488         return result;
   537       }
   489       }
   538 
   490 
   539       if (GCLocker::is_active_and_needs_gc()) {
   491       // If the GCLocker is active and we are bound for a GC, try expanding young gen.
   540         if (g1_policy()->can_expand_young_list()) {
   492       // This is different to when only GCLocker::needs_gc() is set: try to avoid
   541           // No need for an ergo verbose message here,
   493       // waiting because the GCLocker is active to not wait too long.
   542           // can_expand_young_list() does this when it returns true.
   494       if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
   543           result = _allocator->attempt_allocation_force(word_size, context);
   495         // No need for an ergo message here, can_expand_young_list() does this when
   544           if (result != NULL) {
   496         // it returns true.
   545             return result;
   497         result = _allocator->attempt_allocation_force(word_size, context);
   546           }
   498         if (result != NULL) {
   547         }
   499           return result;
   548         should_try_gc = false;
       
   549       } else {
       
   550         // The GCLocker may not be active but the GCLocker initiated
       
   551         // GC may not yet have been performed (GCLocker::needs_gc()
       
   552         // returns true). In this case we do not try this GC and
       
   553         // wait until the GCLocker initiated GC is performed, and
       
   554         // then retry the allocation.
       
   555         if (GCLocker::needs_gc()) {
       
   556           should_try_gc = false;
       
   557         } else {
       
   558           // Read the GC count while still holding the Heap_lock.
       
   559           gc_count_before = total_collections();
       
   560           should_try_gc = true;
       
   561         }
   500         }
   562       }
   501       }
       
   502       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
       
   503       // the GCLocker initiated GC has been performed and then retry. This includes
       
   504       // the case when the GC Locker is not active but has not been performed.
       
   505       should_try_gc = !GCLocker::needs_gc();
       
   506       // Read the GC count while still holding the Heap_lock.
       
   507       gc_count_before = total_collections();
   563     }
   508     }
   564 
   509 
   565     if (should_try_gc) {
   510     if (should_try_gc) {
   566       bool succeeded;
   511       bool succeeded;
   567       result = do_collection_pause(word_size, gc_count_before, &succeeded,
   512       result = do_collection_pause(word_size, gc_count_before, &succeeded,
   568                                    GCCause::_g1_inc_collection_pause);
   513                                    GCCause::_g1_inc_collection_pause);
   569       if (result != NULL) {
   514       if (result != NULL) {
   570         assert(succeeded, "only way to get back a non-NULL result");
   515         assert(succeeded, "only way to get back a non-NULL result");
       
   516         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
       
   517                              Thread::current()->name(), p2i(result));
   571         return result;
   518         return result;
   572       }
   519       }
   573 
   520 
   574       if (succeeded) {
   521       if (succeeded) {
   575         // If we get here we successfully scheduled a collection which
   522         // We successfully scheduled a collection which failed to allocate. No
   576         // failed to allocate. No point in trying to allocate
   523         // point in trying to allocate further. We'll just return NULL.
   577         // further. We'll just return NULL.
   524         log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
   578         MutexLockerEx x(Heap_lock);
   525                              SIZE_FORMAT " words", Thread::current()->name(), word_size);
   579         *gc_count_before_ret = total_collections();
       
   580         return NULL;
   526         return NULL;
   581       }
   527       }
       
   528       log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
       
   529                            Thread::current()->name(), word_size);
   582     } else {
   530     } else {
   583       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
   531       // Failed to schedule a collection.
   584         MutexLockerEx x(Heap_lock);
   532       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
   585         *gc_count_before_ret = total_collections();
   533         log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
       
   534                                SIZE_FORMAT " words", Thread::current()->name(), word_size);
   586         return NULL;
   535         return NULL;
   587       }
   536       }
       
   537       log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
   588       // The GCLocker is either active or the GCLocker initiated
   538       // The GCLocker is either active or the GCLocker initiated
   589       // GC has not yet been performed. Stall until it is and
   539       // GC has not yet been performed. Stall until it is and
   590       // then retry the allocation.
   540       // then retry the allocation.
   591       GCLocker::stall_until_clear();
   541       GCLocker::stall_until_clear();
   592       (*gclocker_retry_count_ret) += 1;
   542       gclocker_retry_count += 1;
   593     }
   543     }
   594 
   544 
   595     // We can reach here if we were unsuccessful in scheduling a
   545     // We can reach here if we were unsuccessful in scheduling a
   596     // collection (because another thread beat us to it) or if we were
   546     // collection (because another thread beat us to it) or if we were
   597     // stalled due to the GC locker. In either can we should retry the
   547     // stalled due to the GC locker. In either can we should retry the
   598     // allocation attempt in case another thread successfully
   548     // allocation attempt in case another thread successfully
   599     // performed a collection and reclaimed enough space. We do the
   549     // performed a collection and reclaimed enough space. We do the
   600     // first attempt (without holding the Heap_lock) here and the
   550     // first attempt (without holding the Heap_lock) here and the
   601     // follow-on attempt will be at the start of the next loop
   551     // follow-on attempt will be at the start of the next loop
   602     // iteration (after taking the Heap_lock).
   552     // iteration (after taking the Heap_lock).
       
   553 
   603     result = _allocator->attempt_allocation(word_size, context);
   554     result = _allocator->attempt_allocation(word_size, context);
   604     if (result != NULL) {
   555     if (result != NULL) {
   605       return result;
   556       return result;
   606     }
   557     }
   607 
   558 
   608     // Give a warning if we seem to be looping forever.
   559     // Give a warning if we seem to be looping forever.
   609     if ((QueuedAllocationWarningCount > 0) &&
   560     if ((QueuedAllocationWarningCount > 0) &&
   610         (try_count % QueuedAllocationWarningCount == 0)) {
   561         (try_count % QueuedAllocationWarningCount == 0)) {
   611       log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
   562       log_warning(gc, alloc)("%s:  Retried allocation %u times for " SIZE_FORMAT " words",
   612                       "retries %d times", try_count);
   563                              Thread::current()->name(), try_count, word_size);
   613     }
   564     }
   614   }
   565   }
   615 
   566 
   616   ShouldNotReachHere();
   567   ShouldNotReachHere();
   617   return NULL;
   568   return NULL;
   828       increase_used(fill_size * HeapWordSize);
   779       increase_used(fill_size * HeapWordSize);
   829     }
   780     }
   830   }
   781   }
   831 }
   782 }
   832 
   783 
   833 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
   784 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size) {
   834                                                      uint* gc_count_before_ret,
       
   835                                                      uint* gclocker_retry_count_ret) {
       
   836   assert_heap_not_locked_and_not_at_safepoint();
   785   assert_heap_not_locked_and_not_at_safepoint();
   837   assert(!is_humongous(word_size), "attempt_allocation() should not "
   786   assert(!is_humongous(word_size), "attempt_allocation() should not "
   838          "be called for humongous allocation requests");
   787          "be called for humongous allocation requests");
   839 
   788 
   840   AllocationContext_t context = AllocationContext::current();
   789   AllocationContext_t context = AllocationContext::current();
   841   HeapWord* result = _allocator->attempt_allocation(word_size, context);
   790   HeapWord* result = _allocator->attempt_allocation(word_size, context);
   842 
   791 
   843   if (result == NULL) {
   792   if (result == NULL) {
   844     result = attempt_allocation_slow(word_size,
   793     result = attempt_allocation_slow(word_size, context);
   845                                      context,
       
   846                                      gc_count_before_ret,
       
   847                                      gclocker_retry_count_ret);
       
   848   }
   794   }
   849   assert_heap_not_locked();
   795   assert_heap_not_locked();
   850   if (result != NULL) {
   796   if (result != NULL) {
   851     dirty_young_block(result, word_size);
   797     dirty_young_block(result, word_size);
   852   }
   798   }
   923                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
   869                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
   924   }
   870   }
   925   decrease_used(size_used);
   871   decrease_used(size_used);
   926 }
   872 }
   927 
   873 
   928 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   874 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
   929                                                         uint* gc_count_before_ret,
   875   ResourceMark rm; // For retrieving the thread names in log messages.
   930                                                         uint* gclocker_retry_count_ret) {
   876 
   931   // The structure of this method has a lot of similarities to
   877   // The structure of this method has a lot of similarities to
   932   // attempt_allocation_slow(). The reason these two were not merged
   878   // attempt_allocation_slow(). The reason these two were not merged
   933   // into a single one is that such a method would require several "if
   879   // into a single one is that such a method would require several "if
   934   // allocation is not humongous do this, otherwise do that"
   880   // allocation is not humongous do this, otherwise do that"
   935   // conditional paths which would obscure its flow. In fact, an early
   881   // conditional paths which would obscure its flow. In fact, an early
   956   // We will loop until a) we manage to successfully perform the
   902   // We will loop until a) we manage to successfully perform the
   957   // allocation or b) we successfully schedule a collection which
   903   // allocation or b) we successfully schedule a collection which
   958   // fails to perform the allocation. b) is the only case when we'll
   904   // fails to perform the allocation. b) is the only case when we'll
   959   // return NULL.
   905   // return NULL.
   960   HeapWord* result = NULL;
   906   HeapWord* result = NULL;
   961   for (int try_count = 1; /* we'll return */; try_count += 1) {
   907   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
   962     bool should_try_gc;
   908     bool should_try_gc;
   963     uint gc_count_before;
   909     uint gc_count_before;
       
   910 
   964 
   911 
   965     {
   912     {
   966       MutexLockerEx x(Heap_lock);
   913       MutexLockerEx x(Heap_lock);
   967 
   914 
   968       // Given that humongous objects are not allocated in young
   915       // Given that humongous objects are not allocated in young
   973         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
   920         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
   974         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
   921         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
   975         return result;
   922         return result;
   976       }
   923       }
   977 
   924 
   978       if (GCLocker::is_active_and_needs_gc()) {
   925       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
   979         should_try_gc = false;
   926       // the GCLocker initiated GC has been performed and then retry. This includes
   980       } else {
   927       // the case when the GC Locker is not active but has not been performed.
   981          // The GCLocker may not be active but the GCLocker initiated
   928       should_try_gc = !GCLocker::needs_gc();
   982         // GC may not yet have been performed (GCLocker::needs_gc()
   929       // Read the GC count while still holding the Heap_lock.
   983         // returns true). In this case we do not try this GC and
   930       gc_count_before = total_collections();
   984         // wait until the GCLocker initiated GC is performed, and
       
   985         // then retry the allocation.
       
   986         if (GCLocker::needs_gc()) {
       
   987           should_try_gc = false;
       
   988         } else {
       
   989           // Read the GC count while still holding the Heap_lock.
       
   990           gc_count_before = total_collections();
       
   991           should_try_gc = true;
       
   992         }
       
   993       }
       
   994     }
   931     }
   995 
   932 
   996     if (should_try_gc) {
   933     if (should_try_gc) {
   997       // If we failed to allocate the humongous object, we should try to
       
   998       // do a collection pause (if we're allowed) in case it reclaims
       
   999       // enough space for the allocation to succeed after the pause.
       
  1000 
       
  1001       bool succeeded;
   934       bool succeeded;
  1002       result = do_collection_pause(word_size, gc_count_before, &succeeded,
   935       result = do_collection_pause(word_size, gc_count_before, &succeeded,
  1003                                    GCCause::_g1_humongous_allocation);
   936                                    GCCause::_g1_humongous_allocation);
  1004       if (result != NULL) {
   937       if (result != NULL) {
  1005         assert(succeeded, "only way to get back a non-NULL result");
   938         assert(succeeded, "only way to get back a non-NULL result");
       
   939         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
       
   940                              Thread::current()->name(), p2i(result));
  1006         return result;
   941         return result;
  1007       }
   942       }
  1008 
   943 
  1009       if (succeeded) {
   944       if (succeeded) {
  1010         // If we get here we successfully scheduled a collection which
   945         // We successfully scheduled a collection which failed to allocate. No
  1011         // failed to allocate. No point in trying to allocate
   946         // point in trying to allocate further. We'll just return NULL.
  1012         // further. We'll just return NULL.
   947         log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
  1013         MutexLockerEx x(Heap_lock);
   948                              SIZE_FORMAT " words", Thread::current()->name(), word_size);
  1014         *gc_count_before_ret = total_collections();
       
  1015         return NULL;
   949         return NULL;
  1016       }
   950       }
       
   951       log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
       
   952                            Thread::current()->name(), word_size);
  1017     } else {
   953     } else {
  1018       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
   954       // Failed to schedule a collection.
  1019         MutexLockerEx x(Heap_lock);
   955       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
  1020         *gc_count_before_ret = total_collections();
   956         log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
       
   957                                SIZE_FORMAT " words", Thread::current()->name(), word_size);
  1021         return NULL;
   958         return NULL;
  1022       }
   959       }
       
   960       log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
  1023       // The GCLocker is either active or the GCLocker initiated
   961       // The GCLocker is either active or the GCLocker initiated
  1024       // GC has not yet been performed. Stall until it is and
   962       // GC has not yet been performed. Stall until it is and
  1025       // then retry the allocation.
   963       // then retry the allocation.
  1026       GCLocker::stall_until_clear();
   964       GCLocker::stall_until_clear();
  1027       (*gclocker_retry_count_ret) += 1;
   965       gclocker_retry_count += 1;
  1028     }
   966     }
       
   967 
  1029 
   968 
  1030     // We can reach here if we were unsuccessful in scheduling a
   969     // We can reach here if we were unsuccessful in scheduling a
  1031     // collection (because another thread beat us to it) or if we were
   970     // collection (because another thread beat us to it) or if we were
  1032     // stalled due to the GC locker. In either can we should retry the
   971     // stalled due to the GC locker. In either can we should retry the
  1033     // allocation attempt in case another thread successfully
   972     // allocation attempt in case another thread successfully
  1034     // performed a collection and reclaimed enough space.  Give a
   973     // performed a collection and reclaimed enough space.
  1035     // warning if we seem to be looping forever.
   974     // Humongous object allocation always needs a lock, so we wait for the retry
       
   975     // in the next iteration of the loop, unlike for the regular iteration case.
       
   976     // Give a warning if we seem to be looping forever.
  1036 
   977 
  1037     if ((QueuedAllocationWarningCount > 0) &&
   978     if ((QueuedAllocationWarningCount > 0) &&
  1038         (try_count % QueuedAllocationWarningCount == 0)) {
   979         (try_count % QueuedAllocationWarningCount == 0)) {
  1039       log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
   980       log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
  1040                       "retries %d times", try_count);
   981                              Thread::current()->name(), try_count, word_size);
  1041     }
   982     }
  1042   }
   983   }
  1043 
   984 
  1044   ShouldNotReachHere();
   985   ShouldNotReachHere();
  1045   return NULL;
   986   return NULL;
  1337   HeapWord* result =
  1278   HeapWord* result =
  1338     attempt_allocation_at_safepoint(word_size,
  1279     attempt_allocation_at_safepoint(word_size,
  1339                                     context,
  1280                                     context,
  1340                                     expect_null_mutator_alloc_region);
  1281                                     expect_null_mutator_alloc_region);
  1341   if (result != NULL) {
  1282   if (result != NULL) {
  1342     assert(*gc_succeeded, "sanity");
       
  1343     return result;
  1283     return result;
  1344   }
  1284   }
  1345 
  1285 
  1346   // In a G1 heap, we're supposed to keep allocation from failing by
  1286   // In a G1 heap, we're supposed to keep allocation from failing by
  1347   // incremental pauses.  Therefore, at least for now, we'll favor
  1287   // incremental pauses.  Therefore, at least for now, we'll favor
  1348   // expansion over collection.  (This might change in the future if we can
  1288   // expansion over collection.  (This might change in the future if we can
  1349   // do something smarter than full collection to satisfy a failed alloc.)
  1289   // do something smarter than full collection to satisfy a failed alloc.)
  1350   result = expand_and_allocate(word_size, context);
  1290   result = expand_and_allocate(word_size, context);
  1351   if (result != NULL) {
  1291   if (result != NULL) {
  1352     assert(*gc_succeeded, "sanity");
       
  1353     return result;
  1292     return result;
  1354   }
  1293   }
  1355 
  1294 
  1356   if (do_gc) {
  1295   if (do_gc) {
  1357     // Expansion didn't work, we'll try to do a Full GC.
  1296     // Expansion didn't work, we'll try to do a Full GC.
  1399                                             false, /* clear_all_soft_refs */
  1338                                             false, /* clear_all_soft_refs */
  1400                                             true,  /* expect_null_mutator_alloc_region */
  1339                                             true,  /* expect_null_mutator_alloc_region */
  1401                                             succeeded);
  1340                                             succeeded);
  1402 
  1341 
  1403   if (result != NULL) {
  1342   if (result != NULL) {
  1404     assert(*succeeded, "sanity");
       
  1405     return result;
  1343     return result;
  1406   }
  1344   }
  1407 
  1345 
  1408   assert(!collector_policy()->should_clear_all_soft_refs(),
  1346   assert(!collector_policy()->should_clear_all_soft_refs(),
  1409          "Flag should have been handled and cleared prior to this point");
  1347          "Flag should have been handled and cleared prior to this point");
  1410 
  1348 
  1411   // What else?  We might try synchronous finalization later.  If the total
  1349   // What else?  We might try synchronous finalization later.  If the total
  1412   // space available is large enough for the allocation, then a more
  1350   // space available is large enough for the allocation, then a more
  1413   // complete compaction phase than we've tried so far might be
  1351   // complete compaction phase than we've tried so far might be
  1414   // appropriate.
  1352   // appropriate.
  1415   assert(*succeeded, "sanity");
       
  1416   return NULL;
  1353   return NULL;
  1417 }
  1354 }
  1418 
  1355 
  1419 // Attempting to expand the heap sufficiently
  1356 // Attempting to expand the heap sufficiently
  1420 // to support an allocation of the given "word_size".  If
  1357 // to support an allocation of the given "word_size".  If
  2145   }
  2082   }
  2146 
  2083 
  2147   // This notify_all() will ensure that a thread that called
  2084   // This notify_all() will ensure that a thread that called
  2148   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2085   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2149   // and it's waiting for a full GC to finish will be woken up. It is
  2086   // and it's waiting for a full GC to finish will be woken up. It is
  2150   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2087   // waiting in VM_G1CollectForAllocation::doit_epilogue().
  2151   FullGCCount_lock->notify_all();
  2088   FullGCCount_lock->notify_all();
  2152 }
  2089 }
  2153 
  2090 
  2154 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2091 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2155   assert_heap_not_locked();
  2092   assert_heap_not_locked();
  2173 
  2110 
  2174     if (should_do_concurrent_full_gc(cause)) {
  2111     if (should_do_concurrent_full_gc(cause)) {
  2175       // Schedule an initial-mark evacuation pause that will start a
  2112       // Schedule an initial-mark evacuation pause that will start a
  2176       // concurrent cycle. We're setting word_size to 0 which means that
  2113       // concurrent cycle. We're setting word_size to 0 which means that
  2177       // we are not requesting a post-GC allocation.
  2114       // we are not requesting a post-GC allocation.
  2178       VM_G1IncCollectionPause op(gc_count_before,
  2115       VM_G1CollectForAllocation op(0,     /* word_size */
  2179                                  0,     /* word_size */
  2116                                    gc_count_before,
  2180                                  true,  /* should_initiate_conc_mark */
  2117                                    cause,
  2181                                  g1_policy()->max_pause_time_ms(),
  2118                                    true,  /* should_initiate_conc_mark */
  2182                                  cause);
  2119                                    g1_policy()->max_pause_time_ms(),
  2183       op.set_allocation_context(AllocationContext::current());
  2120                                    AllocationContext::current());
  2184 
       
  2185       VMThread::execute(&op);
  2121       VMThread::execute(&op);
  2186       if (!op.pause_succeeded()) {
  2122       if (!op.pause_succeeded()) {
  2187         if (old_marking_count_before == _old_marking_cycles_started) {
  2123         if (old_marking_count_before == _old_marking_cycles_started) {
  2188           retry_gc = op.should_retry_gc();
  2124           retry_gc = op.should_retry_gc();
  2189         } else {
  2125         } else {
  2202       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
  2138       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
  2203           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2139           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2204 
  2140 
  2205         // Schedule a standard evacuation pause. We're setting word_size
  2141         // Schedule a standard evacuation pause. We're setting word_size
  2206         // to 0 which means that we are not requesting a post-GC allocation.
  2142         // to 0 which means that we are not requesting a post-GC allocation.
  2207         VM_G1IncCollectionPause op(gc_count_before,
  2143         VM_G1CollectForAllocation op(0,     /* word_size */
  2208                                    0,     /* word_size */
  2144                                      gc_count_before,
  2209                                    false, /* should_initiate_conc_mark */
  2145                                      cause,
  2210                                    g1_policy()->max_pause_time_ms(),
  2146                                      false, /* should_initiate_conc_mark */
  2211                                    cause);
  2147                                      g1_policy()->max_pause_time_ms(),
       
  2148                                      AllocationContext::current());
  2212         VMThread::execute(&op);
  2149         VMThread::execute(&op);
  2213       } else {
  2150       } else {
  2214         // Schedule a Full GC.
  2151         // Schedule a Full GC.
  2215         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
  2152         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
  2216         VMThread::execute(&op);
  2153         VMThread::execute(&op);
  2617 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  2554 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  2618                                                uint gc_count_before,
  2555                                                uint gc_count_before,
  2619                                                bool* succeeded,
  2556                                                bool* succeeded,
  2620                                                GCCause::Cause gc_cause) {
  2557                                                GCCause::Cause gc_cause) {
  2621   assert_heap_not_locked_and_not_at_safepoint();
  2558   assert_heap_not_locked_and_not_at_safepoint();
  2622   VM_G1IncCollectionPause op(gc_count_before,
  2559   VM_G1CollectForAllocation op(word_size,
  2623                              word_size,
  2560                                gc_count_before,
  2624                              false, /* should_initiate_conc_mark */
  2561                                gc_cause,
  2625                              g1_policy()->max_pause_time_ms(),
  2562                                false, /* should_initiate_conc_mark */
  2626                              gc_cause);
  2563                                g1_policy()->max_pause_time_ms(),
  2627 
  2564                                AllocationContext::current());
  2628   op.set_allocation_context(AllocationContext::current());
       
  2629   VMThread::execute(&op);
  2565   VMThread::execute(&op);
  2630 
  2566 
  2631   HeapWord* result = op.result();
  2567   HeapWord* result = op.result();
  2632   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  2568   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  2633   assert(result == NULL || ret_succeeded,
  2569   assert(result == NULL || ret_succeeded,