hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
changeset 8928 e5c53268bef5
parent 8927 461fa7ee5254
child 8930 52368505ee8e
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Mar 29 22:36:16 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Mar 30 10:26:59 2011 -0400
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
 
 #include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/g1AllocRegion.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
@@ -128,6 +129,15 @@
   void          print();
 };
 
+class MutatorAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  MutatorAllocRegion()
+    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
+};
+
 class RefineCardTableEntryClosure;
 class G1CollectedHeap : public SharedHeap {
   friend class VM_G1CollectForAllocation;
@@ -135,6 +145,7 @@
   friend class VM_G1CollectFull;
   friend class VM_G1IncCollectionPause;
   friend class VMStructs;
+  friend class MutatorAllocRegion;
 
   // Closures used in implementation.
   friend class G1ParCopyHelper;
@@ -197,12 +208,15 @@
   // The sequence of all heap regions in the heap.
   HeapRegionSeq* _hrs;
 
-  // The region from which normal-sized objects are currently being
-  // allocated.  May be NULL.
-  HeapRegion* _cur_alloc_region;
+  // Alloc region used to satisfy mutator allocation requests.
+  MutatorAllocRegion _mutator_alloc_region;
 
-  // Postcondition: cur_alloc_region == NULL.
-  void abandon_cur_alloc_region();
+  // It resets the mutator alloc region before new allocations can take place.
+  void init_mutator_alloc_region();
+
+  // It releases the mutator alloc region.
+  void release_mutator_alloc_region();
+
   void abandon_gc_alloc_regions();
 
   // The to-space memory regions into which objects are being copied during
@@ -360,27 +374,21 @@
   G1CollectorPolicy* _g1_policy;
 
   // This is the second level of trying to allocate a new region. If
-  // new_region_work didn't find a region in the free_list, this call
-  // will check whether there's anything available in the
-  // secondary_free_list and/or wait for more regions to appear in that
-  // list, if _free_regions_coming is set.
+  // new_region() didn't find a region on the free_list, this call will
+  // check whether there's anything available on the
+  // secondary_free_list and/or wait for more regions to appear on
+  // that list, if _free_regions_coming is set.
   HeapRegion* new_region_try_secondary_free_list();
 
   // Try to allocate a single non-humongous HeapRegion sufficient for
   // an allocation of the given word_size. If do_expand is true,
   // attempt to expand the heap if necessary to satisfy the allocation
   // request.
-  HeapRegion* new_region_work(size_t word_size, bool do_expand);
+  HeapRegion* new_region(size_t word_size, bool do_expand);
 
-  // Try to allocate a new region to be used for allocation by a
-  // mutator thread. Attempt to expand the heap if no region is
+  // Try to allocate a new region to be used for allocation by
+  // a GC thread. It will try to expand the heap if no region is
   // available.
-  HeapRegion* new_alloc_region(size_t word_size) {
-    return new_region_work(word_size, false /* do_expand */);
-  }
-
-  // Try to allocate a new region to be used for allocation by a GC
-  // thread. Attempt to expand the heap if no region is available.
   HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
 
   // Attempt to satisfy a humongous allocation request of the given
@@ -415,10 +423,6 @@
   // * All non-TLAB allocation requests should go to mem_allocate()
   //   and mem_allocate() should never be called with is_tlab == true.
   //
-  // * If the GC locker is active we currently stall until we can
-  //   allocate a new young region. This will be changed in the
-  //   near future (see CR 6994056).
-  //
   // * If either call cannot satisfy the allocation request using the
   //   current allocating region, they will try to get a new one. If
   //   this fails, they will attempt to do an evacuation pause and
@@ -441,122 +445,38 @@
                                  bool   is_tlab, /* expected to be false */
                                  bool*  gc_overhead_limit_was_exceeded);
 
-  // The following methods, allocate_from_cur_allocation_region(),
-  // attempt_allocation(), attempt_allocation_locked(),
-  // replace_cur_alloc_region_and_allocate(),
-  // attempt_allocation_slow(), and attempt_allocation_humongous()
-  // have very awkward pre- and post-conditions with respect to
-  // locking:
-  //
-  // If they are called outside a safepoint they assume the caller
-  // holds the Heap_lock when it calls them. However, on exit they
-  // will release the Heap_lock if they return a non-NULL result, but
-  // keep holding the Heap_lock if they return a NULL result. The
-  // reason for this is that we need to dirty the cards that span
-  // allocated blocks on young regions to avoid having to take the
-  // slow path of the write barrier (for performance reasons we don't
-  // update RSets for references whose source is a young region, so we
-  // don't need to look at dirty cards on young regions). But, doing
-  // this card dirtying while holding the Heap_lock can be a
-  // scalability bottleneck, especially given that some allocation
-  // requests might be of non-trivial size (and the larger the region
-  // size is, the fewer allocations requests will be considered
-  // humongous, as the humongous size limit is a fraction of the
-  // region size). So, when one of these calls succeeds in allocating
-  // a block it does the card dirtying after it releases the Heap_lock
-  // which is why it will return without holding it.
-  //
-  // The above assymetry is the reason why locking / unlocking is done
-  // explicitly (i.e., with Heap_lock->lock() and
-  // Heap_lock->unlocked()) instead of using MutexLocker and
-  // MutexUnlocker objects. The latter would ensure that the lock is
-  // unlocked / re-locked at every possible exit out of the basic
-  // block. However, we only want that action to happen in selected
-  // places.
-  //
-  // Further, if the above methods are called during a safepoint, then
-  // naturally there's no assumption about the Heap_lock being held or
-  // there's no attempt to unlock it. The parameter at_safepoint
-  // indicates whether the call is made during a safepoint or not (as
-  // an optimization, to avoid reading the global flag with
-  // SafepointSynchronize::is_at_safepoint()).
-  //
-  // The methods share these parameters:
-  //
-  // * word_size     : the size of the allocation request in words
-  // * at_safepoint  : whether the call is done at a safepoint; this
-  //                   also determines whether a GC is permitted
-  //                   (at_safepoint == false) or not (at_safepoint == true)
-  // * do_dirtying   : whether the method should dirty the allocated
-  //                   block before returning
-  //
-  // They all return either the address of the block, if they
-  // successfully manage to allocate it, or NULL.
+  // The following three methods take a gc_count_before_ret
+  // parameter which is used to return the GC count if the method
+  // returns NULL. Given that we are required to read the GC count
+  // while holding the Heap_lock, and these paths will take the
+  // Heap_lock at some point, it's easier to get them to read the GC
+  // count while holding the Heap_lock before they return NULL instead
+  // of the caller (namely: mem_allocate()) having to also take the
+  // Heap_lock just to read the GC count.
+
+  // First-level mutator allocation attempt: try to allocate out of
+  // the mutator alloc region without taking the Heap_lock. This
+  // should only be used for non-humongous allocations.
+  inline HeapWord* attempt_allocation(size_t word_size,
+                                      unsigned int* gc_count_before_ret);
 
-  // It tries to satisfy an allocation request out of the current
-  // alloc region, which is passed as a parameter. It assumes that the
-  // caller has checked that the current alloc region is not NULL.
-  // Given that the caller has to check the current alloc region for
-  // at least NULL, it might as well pass it as the first parameter so
-  // that the method doesn't have to read it from the
-  // _cur_alloc_region field again. It is called from both
-  // attempt_allocation() and attempt_allocation_locked() and the
-  // with_heap_lock parameter indicates whether the caller was holding
-  // the heap lock when it called it or not.
-  inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
-                                                  size_t word_size,
-                                                  bool with_heap_lock);
-
-  // First-level of allocation slow path: it attempts to allocate out
-  // of the current alloc region in a lock-free manner using a CAS. If
-  // that fails it takes the Heap_lock and calls
-  // attempt_allocation_locked() for the second-level slow path.
-  inline HeapWord* attempt_allocation(size_t word_size);
-
-  // Second-level of allocation slow path: while holding the Heap_lock
-  // it tries to allocate out of the current alloc region and, if that
-  // fails, tries to allocate out of a new current alloc region.
-  inline HeapWord* attempt_allocation_locked(size_t word_size);
+  // Second-level mutator allocation attempt: take the Heap_lock and
+  // retry the allocation attempt, potentially scheduling a GC
+  // pause. This should only be used for non-humongous allocations.
+  HeapWord* attempt_allocation_slow(size_t word_size,
+                                    unsigned int* gc_count_before_ret);
 
-  // It assumes that the current alloc region has been retired and
-  // tries to allocate a new one. If it's successful, it performs the
-  // allocation out of the new current alloc region and updates
-  // _cur_alloc_region. Normally, it would try to allocate a new
-  // region if the young gen is not full, unless can_expand is true in
-  // which case it would always try to allocate a new region.
-  HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
-                                                  bool at_safepoint,
-                                                  bool do_dirtying,
-                                                  bool can_expand);
-
-  // Third-level of allocation slow path: when we are unable to
-  // allocate a new current alloc region to satisfy an allocation
-  // request (i.e., when attempt_allocation_locked() fails). It will
-  // try to do an evacuation pause, which might stall due to the GC
-  // locker, and retry the allocation attempt when appropriate.
-  HeapWord* attempt_allocation_slow(size_t word_size);
+  // Takes the Heap_lock and attempts a humongous allocation. It can
+  // potentially schedule a GC pause.
+  HeapWord* attempt_allocation_humongous(size_t word_size,
+                                         unsigned int* gc_count_before_ret);
 
-  // The method that tries to satisfy a humongous allocation
-  // request. If it cannot satisfy it it will try to do an evacuation
-  // pause to perhaps reclaim enough space to be able to satisfy the
-  // allocation request afterwards.
-  HeapWord* attempt_allocation_humongous(size_t word_size,
-                                         bool at_safepoint);
-
-  // It does the common work when we are retiring the current alloc region.
-  inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
-
-  // It retires the current alloc region, which is passed as a
-  // parameter (since, typically, the caller is already holding on to
-  // it). It sets _cur_alloc_region to NULL.
-  void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
-
-  // It attempts to do an allocation immediately before or after an
-  // evacuation pause and can only be called by the VM thread. It has
-  // slightly different assumptions that the ones before (i.e.,
-  // assumes that the current alloc region has been retired).
+  // Allocation attempt that should be called during safepoints (e.g.,
+  // at the end of a successful GC). expect_null_mutator_alloc_region
+  // specifies whether the mutator alloc region is expected to be NULL
+  // or not.
   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
-                                            bool expect_null_cur_alloc_region);
+                                       bool expect_null_mutator_alloc_region);
 
   // It dirties the cards that cover the block so that so that the post
   // write barrier never queues anything when updating objects on this
@@ -583,6 +503,12 @@
   // GC pause.
   void  retire_alloc_region(HeapRegion* alloc_region, bool par);
 
+  // These two methods are the "callbacks" from the G1AllocRegion class.
+
+  HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
+  void retire_mutator_alloc_region(HeapRegion* alloc_region,
+                                   size_t allocated_bytes);
+
   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   //   inspection request and should collect the entire heap
   // - if clear_all_soft_refs is true, all soft references should be
@@ -1027,6 +953,9 @@
   // The number of regions available for "regular" expansion.
   size_t expansion_regions() { return _expansion_regions; }
 
+  void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
+  void verify_dirty_young_regions() PRODUCT_RETURN;
+
   // verify_region_sets() performs verification over the region
   // lists. It will be compiled in the product code to be used when
   // necessary (i.e., during heap verification).