hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
changeset 7923 fc200fcd4e05
parent 7905 cc7740616b03
child 8072 f223f43cd62f
equal deleted inserted replaced
7922:e97540c35e38 7923:fc200fcd4e05
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
    27 
    27 
    28 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMark.hpp"
    29 #include "gc_implementation/g1/g1RemSet.hpp"
    29 #include "gc_implementation/g1/g1RemSet.hpp"
    30 #include "gc_implementation/g1/heapRegion.hpp"
    30 #include "gc_implementation/g1/heapRegionSets.hpp"
    31 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
    31 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
    32 #include "memory/barrierSet.hpp"
    32 #include "memory/barrierSet.hpp"
    33 #include "memory/memRegion.hpp"
    33 #include "memory/memRegion.hpp"
    34 #include "memory/sharedHeap.hpp"
    34 #include "memory/sharedHeap.hpp"
    35 
    35 
    64 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
    64 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
    65 
    65 
    66 enum G1GCThreadGroups {
    66 enum G1GCThreadGroups {
    67   G1CRGroup = 0,
    67   G1CRGroup = 0,
    68   G1ZFGroup = 1,
    68   G1ZFGroup = 1,
    69   G1CMGroup = 2,
    69   G1CMGroup = 2
    70   G1CLGroup = 3
       
    71 };
    70 };
    72 
    71 
    73 enum GCAllocPurpose {
    72 enum GCAllocPurpose {
    74   GCAllocForTenured,
    73   GCAllocForTenured,
    75   GCAllocForSurvived,
    74   GCAllocForSurvived,
   153   friend class G1ParTask;
   152   friend class G1ParTask;
   154   friend class G1FreeGarbageRegionClosure;
   153   friend class G1FreeGarbageRegionClosure;
   155   friend class RefineCardTableEntryClosure;
   154   friend class RefineCardTableEntryClosure;
   156   friend class G1PrepareCompactClosure;
   155   friend class G1PrepareCompactClosure;
   157   friend class RegionSorter;
   156   friend class RegionSorter;
       
   157   friend class RegionResetter;
   158   friend class CountRCClosure;
   158   friend class CountRCClosure;
   159   friend class EvacPopObjClosure;
   159   friend class EvacPopObjClosure;
   160   friend class G1ParCleanupCTTask;
   160   friend class G1ParCleanupCTTask;
   161 
   161 
   162   // Other related classes.
   162   // Other related classes.
   176   MemRegion _g1_committed;
   176   MemRegion _g1_committed;
   177 
   177 
   178   // The maximum part of _g1_storage that has ever been committed.
   178   // The maximum part of _g1_storage that has ever been committed.
   179   MemRegion _g1_max_committed;
   179   MemRegion _g1_max_committed;
   180 
   180 
   181   // The number of regions that are completely free.
   181   // The master free list. It will satisfy all new region allocations.
   182   size_t _free_regions;
   182   MasterFreeRegionList      _free_list;
       
   183 
       
   184   // The secondary free list which contains regions that have been
       
   185   // freed up during the cleanup process. This will be appended to the
       
   186   // master free list when appropriate.
       
   187   SecondaryFreeRegionList   _secondary_free_list;
       
   188 
       
   189   // It keeps track of the humongous regions.
       
   190   MasterHumongousRegionSet  _humongous_set;
   183 
   191 
   184   // The number of regions we could create by expansion.
   192   // The number of regions we could create by expansion.
   185   size_t _expansion_regions;
   193   size_t _expansion_regions;
   186 
       
   187   // Return the number of free regions in the heap (by direct counting.)
       
   188   size_t count_free_regions();
       
   189   // Return the number of free regions on the free and unclean lists.
       
   190   size_t count_free_regions_list();
       
   191 
   194 
   192   // The block offset table for the G1 heap.
   195   // The block offset table for the G1 heap.
   193   G1BlockOffsetSharedArray* _bot_shared;
   196   G1BlockOffsetSharedArray* _bot_shared;
   194 
   197 
   195   // Move all of the regions off the free lists, then rebuild those free
   198   // Move all of the regions off the free lists, then rebuild those free
   196   // lists, before and after full GC.
   199   // lists, before and after full GC.
   197   void tear_down_region_lists();
   200   void tear_down_region_lists();
   198   void rebuild_region_lists();
   201   void rebuild_region_lists();
   199   // This sets all non-empty regions to need zero-fill (which they will if
       
   200   // they are empty after full collection.)
       
   201   void set_used_regions_to_need_zero_fill();
       
   202 
   202 
   203   // The sequence of all heap regions in the heap.
   203   // The sequence of all heap regions in the heap.
   204   HeapRegionSeq* _hrs;
   204   HeapRegionSeq* _hrs;
   205 
   205 
   206   // The region from which normal-sized objects are currently being
   206   // The region from which normal-sized objects are currently being
   229   HeapRegion* _gc_alloc_region_list;
   229   HeapRegion* _gc_alloc_region_list;
   230 
   230 
   231   // Determines PLAB size for a particular allocation purpose.
   231   // Determines PLAB size for a particular allocation purpose.
   232   static size_t desired_plab_sz(GCAllocPurpose purpose);
   232   static size_t desired_plab_sz(GCAllocPurpose purpose);
   233 
   233 
   234   // When called by par thread, require par_alloc_during_gc_lock() to be held.
   234   // When called by par thread, requires the FreeList_lock to be held.
   235   void push_gc_alloc_region(HeapRegion* hr);
   235   void push_gc_alloc_region(HeapRegion* hr);
   236 
   236 
   237   // This should only be called single-threaded.  Undeclares all GC alloc
   237   // This should only be called single-threaded.  Undeclares all GC alloc
   238   // regions.
   238   // regions.
   239   void forget_alloc_region_list();
   239   void forget_alloc_region_list();
   292 
   292 
   293   // These are macros so that, if the assert fires, we get the correct
   293   // These are macros so that, if the assert fires, we get the correct
   294   // line number, file, etc.
   294   // line number, file, etc.
   295 
   295 
   296 #define heap_locking_asserts_err_msg(__extra_message)                         \
   296 #define heap_locking_asserts_err_msg(__extra_message)                         \
   297   err_msg("%s : Heap_lock %slocked, %sat a safepoint",                        \
   297   err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
   298           (__extra_message),                                                  \
   298           (__extra_message),                                                  \
   299           (!Heap_lock->owned_by_self()) ? "NOT " : "",                        \
   299           BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
   300           (!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
   300           BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
       
   301           BOOL_TO_STR(Thread::current()->is_VM_thread()))
   301 
   302 
   302 #define assert_heap_locked()                                                  \
   303 #define assert_heap_locked()                                                  \
   303   do {                                                                        \
   304   do {                                                                        \
   304     assert(Heap_lock->owned_by_self(),                                        \
   305     assert(Heap_lock->owned_by_self(),                                        \
   305            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
   306            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
   306   } while (0)
   307   } while (0)
   307 
   308 
   308 #define assert_heap_locked_or_at_safepoint()                                  \
   309 #define assert_heap_locked_or_at_safepoint(__should_be_vm_thread)             \
   309   do {                                                                        \
   310   do {                                                                        \
   310     assert(Heap_lock->owned_by_self() ||                                      \
   311     assert(Heap_lock->owned_by_self() ||                                      \
   311                                      SafepointSynchronize::is_at_safepoint(), \
   312            (SafepointSynchronize::is_at_safepoint() &&                        \
       
   313              ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
   312            heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
   314            heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
   313                                         "should be at a safepoint"));         \
   315                                         "should be at a safepoint"));         \
   314   } while (0)
   316   } while (0)
   315 
   317 
   316 #define assert_heap_locked_and_not_at_safepoint()                             \
   318 #define assert_heap_locked_and_not_at_safepoint()                             \
   333                                     !SafepointSynchronize::is_at_safepoint(), \
   335                                     !SafepointSynchronize::is_at_safepoint(), \
   334       heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
   336       heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
   335                                    "should not be at a safepoint"));          \
   337                                    "should not be at a safepoint"));          \
   336   } while (0)
   338   } while (0)
   337 
   339 
   338 #define assert_at_safepoint()                                                 \
   340 #define assert_at_safepoint(__should_be_vm_thread)                            \
   339   do {                                                                        \
   341   do {                                                                        \
   340     assert(SafepointSynchronize::is_at_safepoint(),                           \
   342     assert(SafepointSynchronize::is_at_safepoint() &&                         \
       
   343               ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
   341            heap_locking_asserts_err_msg("should be at a safepoint"));         \
   344            heap_locking_asserts_err_msg("should be at a safepoint"));         \
   342   } while (0)
   345   } while (0)
   343 
   346 
   344 #define assert_not_at_safepoint()                                             \
   347 #define assert_not_at_safepoint()                                             \
   345   do {                                                                        \
   348   do {                                                                        \
   360   YoungList*  _young_list;
   363   YoungList*  _young_list;
   361 
   364 
   362   // The current policy object for the collector.
   365   // The current policy object for the collector.
   363   G1CollectorPolicy* _g1_policy;
   366   G1CollectorPolicy* _g1_policy;
   364 
   367 
   365   // Parallel allocation lock to protect the current allocation region.
   368   // This is the second level of trying to allocate a new region. If
   366   Mutex  _par_alloc_during_gc_lock;
   369   // new_region_work didn't find a region in the free_list, this call
   367   Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; }
   370   // will check whether there's anything available in the
   368 
   371   // secondary_free_list and/or wait for more regions to appear in that
   369   // If possible/desirable, allocate a new HeapRegion for normal object
   372   // list, if _free_regions_coming is set.
   370   // allocation sufficient for an allocation of the given "word_size".
   373   HeapRegion* new_region_try_secondary_free_list(size_t word_size);
   371   // If "do_expand" is true, will attempt to expand the heap if necessary
   374 
   372   // to to satisfy the request.  If "zero_filled" is true, requires a
   375   // It will try to allocate a single non-humongous HeapRegion
   373   // zero-filled region.
   376   // sufficient for an allocation of the given word_size.  If
   374   // (Returning NULL will trigger a GC.)
   377   // do_expand is true, it will attempt to expand the heap if
   375   virtual HeapRegion* newAllocRegion_work(size_t word_size,
   378   // necessary to satisfy the allocation request. Note that word_size
   376                                           bool do_expand,
   379   // is only used to make sure that we expand sufficiently but, given
   377                                           bool zero_filled);
   380   // that the allocation request is assumed not to be humongous,
   378 
   381   // having word_size is not strictly necessary (expanding by a single
   379   virtual HeapRegion* newAllocRegion(size_t word_size,
   382   // region will always be sufficient). But let's keep that parameter
   380                                      bool zero_filled = true) {
   383   // in case we need it in the future.
   381     return newAllocRegion_work(word_size, false, zero_filled);
   384   HeapRegion* new_region_work(size_t word_size, bool do_expand);
   382   }
   385 
   383   virtual HeapRegion* newAllocRegionWithExpansion(int purpose,
   386   // It will try to allocate a new region to be used for allocation by
   384                                                   size_t word_size,
   387   // mutator threads. It will not try to expand the heap if not region
   385                                                   bool zero_filled = true);
   388   // is available.
       
   389   HeapRegion* new_alloc_region(size_t word_size) {
       
   390     return new_region_work(word_size, false /* do_expand */);
       
   391   }
       
   392 
       
   393   // It will try to allocate a new region to be used for allocation by
       
   394   // a GC thread. It will try to expand the heap if no region is
       
   395   // available.
       
   396   HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
       
   397 
       
   398   int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
   386 
   399 
   387   // Attempt to allocate an object of the given (very large) "word_size".
   400   // Attempt to allocate an object of the given (very large) "word_size".
   388   // Returns "NULL" on failure.
   401   // Returns "NULL" on failure.
   389   virtual HeapWord* humongous_obj_allocate(size_t word_size);
   402   HeapWord* humongous_obj_allocate(size_t word_size);
   390 
   403 
   391   // The following two methods, allocate_new_tlab() and
   404   // The following two methods, allocate_new_tlab() and
   392   // mem_allocate(), are the two main entry points from the runtime
   405   // mem_allocate(), are the two main entry points from the runtime
   393   // into the G1's allocation routines. They have the following
   406   // into the G1's allocation routines. They have the following
   394   // assumptions:
   407   // assumptions:
   758                              OopClosure* non_root_closure);
   771                              OopClosure* non_root_closure);
   759 
   772 
   760   // Invoke "save_marks" on all heap regions.
   773   // Invoke "save_marks" on all heap regions.
   761   void save_marks();
   774   void save_marks();
   762 
   775 
   763   // Free a heap region.
   776   // It frees a non-humongous region by initializing its contents and
   764   void free_region(HeapRegion* hr);
   777   // adding it to the free list that's passed as a parameter (this is
   765   // A component of "free_region", exposed for 'batching'.
   778   // usually a local list which will be appended to the master free
   766   // All the params after "hr" are out params: the used bytes of the freed
   779   // list later). The used bytes of freed regions are accumulated in
   767   // region(s), the number of H regions cleared, the number of regions
   780   // pre_used. If par is true, the region's RSet will not be freed
   768   // freed, and pointers to the head and tail of a list of freed contig
   781   // up. The assumption is that this will be done later.
   769   // regions, linked throught the "next_on_unclean_list" field.
   782   void free_region(HeapRegion* hr,
   770   void free_region_work(HeapRegion* hr,
   783                    size_t* pre_used,
   771                         size_t& pre_used,
   784                    FreeRegionList* free_list,
   772                         size_t& cleared_h,
   785                    bool par);
   773                         size_t& freed_regions,
   786 
   774                         UncleanRegionList* list,
   787   // It frees a humongous region by collapsing it into individual
   775                         bool par = false);
   788   // regions and calling free_region() for each of them. The freed
   776 
   789   // regions will be added to the free list that's passed as a parameter
       
   790   // (this is usually a local list which will be appended to the
       
   791   // master free list later). The used bytes of freed regions are
       
   792   // accumulated in pre_used. If par is true, the region's RSet will
       
   793   // not be freed up. The assumption is that this will be done later.
       
   794   void free_humongous_region(HeapRegion* hr,
       
   795                              size_t* pre_used,
       
   796                              FreeRegionList* free_list,
       
   797                              HumongousRegionSet* humongous_proxy_set,
       
   798                              bool par);
   777 
   799 
   778   // The concurrent marker (and the thread it runs in.)
   800   // The concurrent marker (and the thread it runs in.)
   779   ConcurrentMark* _cm;
   801   ConcurrentMark* _cm;
   780   ConcurrentMarkThread* _cmThread;
   802   ConcurrentMarkThread* _cmThread;
   781   bool _mark_in_progress;
   803   bool _mark_in_progress;
   782 
   804 
   783   // The concurrent refiner.
   805   // The concurrent refiner.
   784   ConcurrentG1Refine* _cg1r;
   806   ConcurrentG1Refine* _cg1r;
   785 
       
   786   // The concurrent zero-fill thread.
       
   787   ConcurrentZFThread* _czft;
       
   788 
   807 
   789   // The parallel task queues
   808   // The parallel task queues
   790   RefToScanQueueSet *_task_queues;
   809   RefToScanQueueSet *_task_queues;
   791 
   810 
   792   // True iff a evacuation has failed in the current collection.
   811   // True iff a evacuation has failed in the current collection.
   875     G1H_PS_NumElements
   894     G1H_PS_NumElements
   876   };
   895   };
   877 
   896 
   878   SubTasksDone* _process_strong_tasks;
   897   SubTasksDone* _process_strong_tasks;
   879 
   898 
   880   // List of regions which require zero filling.
   899   volatile bool _free_regions_coming;
   881   UncleanRegionList _unclean_region_list;
       
   882   bool _unclean_regions_coming;
       
   883 
   900 
   884 public:
   901 public:
   885 
   902 
   886   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
   903   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
   887 
   904 
  1000 
  1017 
  1001   // The number of regions that are completely free.
  1018   // The number of regions that are completely free.
  1002   size_t max_regions();
  1019   size_t max_regions();
  1003 
  1020 
  1004   // The number of regions that are completely free.
  1021   // The number of regions that are completely free.
  1005   size_t free_regions();
  1022   size_t free_regions() {
       
  1023     return _free_list.length();
       
  1024   }
  1006 
  1025 
  1007   // The number of regions that are not completely free.
  1026   // The number of regions that are not completely free.
  1008   size_t used_regions() { return n_regions() - free_regions(); }
  1027   size_t used_regions() { return n_regions() - free_regions(); }
  1009 
  1028 
  1010   // True iff the ZF thread should run.
       
  1011   bool should_zf();
       
  1012 
       
  1013   // The number of regions available for "regular" expansion.
  1029   // The number of regions available for "regular" expansion.
  1014   size_t expansion_regions() { return _expansion_regions; }
  1030   size_t expansion_regions() { return _expansion_regions; }
  1015 
  1031 
  1016 #ifndef PRODUCT
  1032   // verify_region_sets() performs verification over the region
  1017   bool regions_accounted_for();
  1033   // lists. It will be compiled in the product code to be used when
  1018   bool print_region_accounting_info();
  1034   // necessary (i.e., during heap verification).
  1019   void print_region_counts();
  1035   void verify_region_sets();
  1020 #endif
  1036 
  1021 
  1037   // verify_region_sets_optional() is planted in the code for
  1022   HeapRegion* alloc_region_from_unclean_list(bool zero_filled);
  1038   // list verification in non-product builds (and it can be enabled in
  1023   HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled);
  1039   // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
  1024 
  1040 #if HEAP_REGION_SET_FORCE_VERIFY
  1025   void put_region_on_unclean_list(HeapRegion* r);
  1041   void verify_region_sets_optional() {
  1026   void put_region_on_unclean_list_locked(HeapRegion* r);
  1042     verify_region_sets();
  1027 
  1043   }
  1028   void prepend_region_list_on_unclean_list(UncleanRegionList* list);
  1044 #else // HEAP_REGION_SET_FORCE_VERIFY
  1029   void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list);
  1045   void verify_region_sets_optional() { }
  1030 
  1046 #endif // HEAP_REGION_SET_FORCE_VERIFY
  1031   void set_unclean_regions_coming(bool b);
  1047 
  1032   void set_unclean_regions_coming_locked(bool b);
  1048 #ifdef ASSERT
  1033   // Wait for cleanup to be complete.
  1049   bool is_on_free_list(HeapRegion* hr) {
  1034   void wait_for_cleanup_complete();
  1050     return hr->containing_set() == &_free_list;
  1035   // Like above, but assumes that the calling thread owns the Heap_lock.
  1051   }
  1036   void wait_for_cleanup_complete_locked();
  1052 
  1037 
  1053   bool is_on_humongous_set(HeapRegion* hr) {
  1038   // Return the head of the unclean list.
  1054     return hr->containing_set() == &_humongous_set;
  1039   HeapRegion* peek_unclean_region_list_locked();
  1055 }
  1040   // Remove and return the head of the unclean list.
  1056 #endif // ASSERT
  1041   HeapRegion* pop_unclean_region_list_locked();
  1057 
  1042 
  1058   // Wrapper for the region list operations that can be called from
  1043   // List of regions which are zero filled and ready for allocation.
  1059   // methods outside this class.
  1044   HeapRegion* _free_region_list;
  1060 
  1045   // Number of elements on the free list.
  1061   void secondary_free_list_add_as_tail(FreeRegionList* list) {
  1046   size_t _free_region_list_size;
  1062     _secondary_free_list.add_as_tail(list);
  1047 
  1063   }
  1048   // If the head of the unclean list is ZeroFilled, move it to the free
  1064 
  1049   // list.
  1065   void append_secondary_free_list() {
  1050   bool move_cleaned_region_to_free_list_locked();
  1066     _free_list.add_as_tail(&_secondary_free_list);
  1051   bool move_cleaned_region_to_free_list();
  1067   }
  1052 
  1068 
  1053   void put_free_region_on_list_locked(HeapRegion* r);
  1069   void append_secondary_free_list_if_not_empty() {
  1054   void put_free_region_on_list(HeapRegion* r);
  1070     if (!_secondary_free_list.is_empty()) {
  1055 
  1071       MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  1056   // Remove and return the head element of the free list.
  1072       append_secondary_free_list();
  1057   HeapRegion* pop_free_region_list_locked();
  1073     }
  1058 
  1074   }
  1059   // If "zero_filled" is true, we first try the free list, then we try the
  1075 
  1060   // unclean list, zero-filling the result.  If "zero_filled" is false, we
  1076   void set_free_regions_coming();
  1061   // first try the unclean list, then the zero-filled list.
  1077   void reset_free_regions_coming();
  1062   HeapRegion* alloc_free_region_from_lists(bool zero_filled);
  1078   bool free_regions_coming() { return _free_regions_coming; }
  1063 
  1079   void wait_while_free_regions_coming();
  1064   // Verify the integrity of the region lists.
       
  1065   void remove_allocated_regions_from_lists();
       
  1066   bool verify_region_lists();
       
  1067   bool verify_region_lists_locked();
       
  1068   size_t unclean_region_list_length();
       
  1069   size_t free_region_list_length();
       
  1070 
  1080 
  1071   // Perform a collection of the heap; intended for use in implementing
  1081   // Perform a collection of the heap; intended for use in implementing
  1072   // "System.gc".  This probably implies as full a collection as the
  1082   // "System.gc".  This probably implies as full a collection as the
  1073   // "CollectedHeap" supports.
  1083   // "CollectedHeap" supports.
  1074   virtual void collect(GCCause::Cause cause);
  1084   virtual void collect(GCCause::Cause cause);
  1083   virtual void collect_as_vm_thread(GCCause::Cause cause);
  1093   virtual void collect_as_vm_thread(GCCause::Cause cause);
  1084 
  1094 
  1085   // True iff a evacuation has failed in the most-recent collection.
  1095   // True iff a evacuation has failed in the most-recent collection.
  1086   bool evacuation_failed() { return _evacuation_failed; }
  1096   bool evacuation_failed() { return _evacuation_failed; }
  1087 
  1097 
  1088   // Free a region if it is totally full of garbage.  Returns the number of
  1098   // It will free a region if it has allocated objects in it that are
  1089   // bytes freed (0 ==> didn't free it).
  1099   // all dead. It calls either free_region() or
  1090   size_t free_region_if_totally_empty(HeapRegion *hr);
  1100   // free_humongous_region() depending on the type of the region that
  1091   void free_region_if_totally_empty_work(HeapRegion *hr,
  1101   // is passed to it.
  1092                                          size_t& pre_used,
  1102   void free_region_if_totally_empty(HeapRegion* hr,
  1093                                          size_t& cleared_h_regions,
  1103                                     size_t* pre_used,
  1094                                          size_t& freed_regions,
  1104                                     FreeRegionList* free_list,
  1095                                          UncleanRegionList* list,
  1105                                     HumongousRegionSet* humongous_proxy_set,
  1096                                          bool par = false);
  1106                                     bool par);
  1097 
  1107 
  1098   // If we've done free region work that yields the given changes, update
  1108   // It appends the free list to the master free list and updates the
  1099   // the relevant global variables.
  1109   // master humongous list according to the contents of the proxy
  1100   void finish_free_region_work(size_t pre_used,
  1110   // list. It also adjusts the total used bytes according to pre_used
  1101                                size_t cleared_h_regions,
  1111   // (if par is true, it will do so by taking the ParGCRareEvent_lock).
  1102                                size_t freed_regions,
  1112   void update_sets_after_freeing_regions(size_t pre_used,
  1103                                UncleanRegionList* list);
  1113                                        FreeRegionList* free_list,
  1104 
  1114                                        HumongousRegionSet* humongous_proxy_set,
       
  1115                                        bool par);
  1105 
  1116 
  1106   // Returns "TRUE" iff "p" points into the allocated area of the heap.
  1117   // Returns "TRUE" iff "p" points into the allocated area of the heap.
  1107   virtual bool is_in(const void* p) const;
  1118   virtual bool is_in(const void* p) const;
  1108 
  1119 
  1109   // Return "TRUE" iff the given object address is within the collection
  1120   // Return "TRUE" iff the given object address is within the collection
  1312     // At least until perm gen collection is also G1-ified, at
  1323     // At least until perm gen collection is also G1-ified, at
  1313     // which point this should return false.
  1324     // which point this should return false.
  1314     return true;
  1325     return true;
  1315   }
  1326   }
  1316 
  1327 
  1317   virtual bool allocs_are_zero_filled();
       
  1318 
       
  1319   // The boundary between a "large" and "small" array of primitives, in
  1328   // The boundary between a "large" and "small" array of primitives, in
  1320   // words.
  1329   // words.
  1321   virtual size_t large_typearray_limit();
  1330   virtual size_t large_typearray_limit();
  1322 
  1331 
  1323   // Returns "true" iff the given word_size is "very large".
  1332   // Returns "true" iff the given word_size is "very large".
  1544 
  1553 
  1545   // </NEW PREDICTION>
  1554   // </NEW PREDICTION>
  1546 
  1555 
  1547 protected:
  1556 protected:
  1548   size_t _max_heap_capacity;
  1557   size_t _max_heap_capacity;
  1549 
       
  1550 public:
       
  1551   // Temporary: call to mark things unimplemented for the G1 heap (e.g.,
       
  1552   // MemoryService).  In productization, we can make this assert false
       
  1553   // to catch such places (as well as searching for calls to this...)
       
  1554   static void g1_unimplemented();
       
  1555 
       
  1556 };
  1558 };
  1557 
  1559 
  1558 #define use_local_bitmaps         1
  1560 #define use_local_bitmaps         1
  1559 #define verify_local_bitmaps      0
  1561 #define verify_local_bitmaps      0
  1560 #define oop_buffer_length       256
  1562 #define oop_buffer_length       256