src/hotspot/share/gc/g1/heapRegion.hpp
changeset 58980 47c20fc6a517
parent 57802 854e828d6b5b
child 59060 fce1fa1bdc91
equal deleted inserted replaced
58979:1edb08142cea 58980:47c20fc6a517
    29 #include "gc/g1/g1HeapRegionTraceType.hpp"
    29 #include "gc/g1/g1HeapRegionTraceType.hpp"
    30 #include "gc/g1/heapRegionTracer.hpp"
    30 #include "gc/g1/heapRegionTracer.hpp"
    31 #include "gc/g1/heapRegionType.hpp"
    31 #include "gc/g1/heapRegionType.hpp"
    32 #include "gc/g1/survRateGroup.hpp"
    32 #include "gc/g1/survRateGroup.hpp"
    33 #include "gc/shared/ageTable.hpp"
    33 #include "gc/shared/ageTable.hpp"
    34 #include "gc/shared/cardTable.hpp"
    34 #include "gc/shared/spaceDecorator.hpp"
    35 #include "gc/shared/verifyOption.hpp"
    35 #include "gc/shared/verifyOption.hpp"
    36 #include "gc/shared/spaceDecorator.hpp"
    36 #include "runtime/mutex.hpp"
    37 #include "utilities/macros.hpp"
    37 #include "utilities/macros.hpp"
       
    38 
       
    39 class G1CollectedHeap;
       
    40 class G1CMBitMap;
       
    41 class HeapRegionRemSet;
       
    42 class HeapRegion;
       
    43 class HeapRegionSetBase;
       
    44 class nmethod;
       
    45 
       
    46 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
       
    47 #define HR_FORMAT_PARAMS(_hr_) \
       
    48                 (_hr_)->hrm_index(), \
       
    49                 (_hr_)->get_short_type_str(), \
       
    50                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
       
    51 
       
    52 // sentinel value for hrm_index
       
    53 #define G1_NO_HRM_INDEX ((uint) -1)
    38 
    54 
    39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
    55 // A HeapRegion is the smallest piece of a G1CollectedHeap that
    40 // can be collected independently.
    56 // can be collected independently.
    41 
       
    42 // NOTE: Although a HeapRegion is a Space, its
       
    43 // Space::initDirtyCardClosure method must not be called.
       
    44 // The problem is that the existence of this method breaks
       
    45 // the independence of barrier sets from remembered sets.
       
    46 // The solution is to remove this method from the definition
       
    47 // of a Space.
       
    48 
    57 
    49 // Each heap region is self contained. top() and end() can never
    58 // Each heap region is self contained. top() and end() can never
    50 // be set beyond the end of the region. For humongous objects,
    59 // be set beyond the end of the region. For humongous objects,
    51 // the first region is a StartsHumongous region. If the humongous
    60 // the first region is a StartsHumongous region. If the humongous
    52 // object is larger than a heap region, the following regions will
    61 // object is larger than a heap region, the following regions will
    53 // be of type ContinuesHumongous. In this case the top() of the
    62 // be of type ContinuesHumongous. In this case the top() of the
    54 // StartHumongous region and all ContinuesHumongous regions except
    63 // StartHumongous region and all ContinuesHumongous regions except
    55 // the last will point to their own end. The last ContinuesHumongous
    64 // the last will point to their own end. The last ContinuesHumongous
    56 // region may have top() equal the end of object if there isn't
    65 // region may have top() equal the end of object if there isn't
    57 // room for filler objects to pad out to the end of the region.
    66 // room for filler objects to pad out to the end of the region.
    58 
    67 class HeapRegion : public CHeapObj<mtGC> {
    59 class G1CollectedHeap;
       
    60 class G1CMBitMap;
       
    61 class G1IsAliveAndApplyClosure;
       
    62 class HeapRegionRemSet;
       
    63 class HeapRegion;
       
    64 class HeapRegionSetBase;
       
    65 class nmethod;
       
    66 
       
    67 #define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
       
    68 #define HR_FORMAT_PARAMS(_hr_) \
       
    69                 (_hr_)->hrm_index(), \
       
    70                 (_hr_)->get_short_type_str(), \
       
    71                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
       
    72 
       
    73 // sentinel value for hrm_index
       
    74 #define G1_NO_HRM_INDEX ((uint) -1)
       
    75 
       
    76 // The complicating factor is that BlockOffsetTable diverged
       
    77 // significantly, and we need functionality that is only in the G1 version.
       
    78 // So I copied that code, which led to an alternate G1 version of
       
    79 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
       
    80 // be reconciled, then G1OffsetTableContigSpace could go away.
       
    81 
       
    82 // The idea behind time stamps is the following. We want to keep track of
       
    83 // the highest address where it's safe to scan objects for each region.
       
    84 // This is only relevant for current GC alloc regions so we keep a time stamp
       
    85 // per region to determine if the region has been allocated during the current
       
    86 // GC or not. If the time stamp is current we report a scan_top value which
       
    87 // was saved at the end of the previous GC for retained alloc regions and which is
       
    88 // equal to the bottom for all other regions.
       
    89 // There is a race between card scanners and allocating gc workers where we must ensure
       
    90 // that card scanners do not read the memory allocated by the gc workers.
       
    91 // In order to enforce that, we must not return a value of _top which is more recent than the
       
    92 // time stamp. This is due to the fact that a region may become a gc alloc region at
       
    93 // some point after we've read the timestamp value as being < the current time stamp.
       
    94 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
       
    95 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
       
    96 // evacuation pauses between two cleanups, which is _highly_ unlikely.
       
    97 class G1ContiguousSpace: public CompactibleSpace {
       
    98   friend class VMStructs;
    68   friend class VMStructs;
       
    69 
       
    70   HeapWord* _bottom;
       
    71   HeapWord* _end;
       
    72 
    99   HeapWord* volatile _top;
    73   HeapWord* volatile _top;
   100  protected:
    74   HeapWord* _compaction_top;
       
    75 
   101   G1BlockOffsetTablePart _bot_part;
    76   G1BlockOffsetTablePart _bot_part;
   102   Mutex _par_alloc_lock;
    77   Mutex _par_alloc_lock;
   103   // When we need to retire an allocation region, while other threads
    78   // When we need to retire an allocation region, while other threads
   104   // are also concurrently trying to allocate into it, we typically
    79   // are also concurrently trying to allocate into it, we typically
   105   // allocate a dummy object at the end of the region to ensure that
    80   // allocate a dummy object at the end of the region to ensure that
   106   // no more allocations can take place in it. However, sometimes we
    81   // no more allocations can take place in it. However, sometimes we
   107   // want to know where the end of the last "real" object we allocated
    82   // want to know where the end of the last "real" object we allocated
   108   // into the region was and this is what this keeps track.
    83   // into the region was and this is what this keeps track.
   109   HeapWord* _pre_dummy_top;
    84   HeapWord* _pre_dummy_top;
   110 
    85 
   111  public:
    86 public:
   112   G1ContiguousSpace(G1BlockOffsetTable* bot);
    87   void set_bottom(HeapWord* value) { _bottom = value; }
       
    88   HeapWord* bottom() const         { return _bottom; }
       
    89 
       
    90   void set_end(HeapWord* value)    { _end = value; }
       
    91   HeapWord* end() const            { return _end;    }
       
    92 
       
    93   void set_compaction_top(HeapWord* compaction_top) { _compaction_top = compaction_top; }
       
    94   HeapWord* compaction_top() const { return _compaction_top; }
   113 
    95 
   114   void set_top(HeapWord* value) { _top = value; }
    96   void set_top(HeapWord* value) { _top = value; }
   115   HeapWord* top() const { return _top; }
    97   HeapWord* top() const { return _top; }
   116 
    98 
   117  protected:
    99   // Returns true iff the given the heap  region contains the
   118   // Reset the G1ContiguousSpace.
   100   // given address as part of an allocated object. This may
   119   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   101   // be a potentially, so we restrict its use to assertion checks only.
   120 
   102   bool is_in(const void* p) const {
   121   HeapWord* volatile* top_addr() { return &_top; }
   103     return is_in_reserved(p);
   122   // Try to allocate at least min_word_size and up to desired_size from this Space.
   104   }
       
   105   bool is_in(oop obj) const {
       
   106     return is_in((void*)obj);
       
   107   }
       
   108   // Returns true iff the given reserved memory of the space contains the
       
   109   // given address.
       
   110   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
       
   111 
       
   112   size_t capacity()     const { return byte_size(bottom(), end()); }
       
   113   size_t used() const { return byte_size(bottom(), top()); }
       
   114   size_t free() const { return byte_size(top(), end()); }
       
   115 
       
   116   bool is_empty() const { return used() == 0; }
       
   117 
       
   118 private:
       
   119   void reset_after_compaction() { set_top(compaction_top()); }
       
   120 
       
   121   // Try to allocate at least min_word_size and up to desired_size from this region.
   123   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   122   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   124   // space allocated.
   123   // space allocated.
   125   // This version assumes that all allocation requests to this Space are properly
   124   // This version assumes that all allocation requests to this HeapRegion are properly
   126   // synchronized.
   125   // synchronized.
   127   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   126   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   128   // Try to allocate at least min_word_size and up to desired_size from this Space.
   127   // Try to allocate at least min_word_size and up to desired_size from this HeapRegion.
   129   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   128   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   130   // space allocated.
   129   // space allocated.
   131   // This version synchronizes with other calls to par_allocate_impl().
   130   // This version synchronizes with other calls to par_allocate_impl().
   132   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   131   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   133 
   132 
   134  public:
   133   void mangle_unused_area() PRODUCT_RETURN;
   135   void reset_after_compaction() { set_top(compaction_top()); }
   134 
   136 
   135 public:
   137   size_t used() const { return byte_size(bottom(), top()); }
       
   138   size_t free() const { return byte_size(top(), end()); }
       
   139   bool is_free_block(const HeapWord* p) const { return p >= top(); }
       
   140 
       
   141   MemRegion used_region() const { return MemRegion(bottom(), top()); }
       
   142 
       
   143   void object_iterate(ObjectClosure* blk);
   136   void object_iterate(ObjectClosure* blk);
   144   void safe_object_iterate(ObjectClosure* blk);
       
   145 
       
   146   void mangle_unused_area() PRODUCT_RETURN;
       
   147   void mangle_unused_area_complete() PRODUCT_RETURN;
       
   148 
   137 
   149   // See the comment above in the declaration of _pre_dummy_top for an
   138   // See the comment above in the declaration of _pre_dummy_top for an
   150   // explanation of what it is.
   139   // explanation of what it is.
   151   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
   140   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
   152     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
   141     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
   153     _pre_dummy_top = pre_dummy_top;
   142     _pre_dummy_top = pre_dummy_top;
   154   }
   143   }
       
   144 
   155   HeapWord* pre_dummy_top() {
   145   HeapWord* pre_dummy_top() {
   156     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
   146     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
   157   }
   147   }
   158   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
   148   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
   159 
   149 
   160   virtual void clear(bool mangle_space);
   150   void clear(bool mangle_space);
   161 
   151 
   162   HeapWord* block_start(const void* p);
   152   HeapWord* block_start(const void* p);
   163   HeapWord* block_start_const(const void* p) const;
   153   HeapWord* block_start_const(const void* p) const;
   164 
   154 
   165   // Allocation (return NULL if full).  Assumes the caller has established
   155   // Allocation (return NULL if full).  Assumes the caller has established
   166   // mutually exclusive access to the space.
   156   // mutually exclusive access to the HeapRegion.
   167   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   157   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   168   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   158   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   169   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   159   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   170 
   160 
   171   virtual HeapWord* allocate(size_t word_size);
   161   HeapWord* allocate(size_t word_size);
   172   virtual HeapWord* par_allocate(size_t word_size);
   162   HeapWord* par_allocate(size_t word_size);
   173 
   163 
   174   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
   164   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
   175 
   165 
   176   // MarkSweep support phase3
   166   // MarkSweep support phase3
   177   virtual HeapWord* initialize_threshold();
   167   HeapWord* initialize_threshold();
   178   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   168   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   179 
       
   180   virtual void print() const;
       
   181 
   169 
   182   void reset_bot() {
   170   void reset_bot() {
   183     _bot_part.reset_bot();
   171     _bot_part.reset_bot();
   184   }
   172   }
   185 
   173 
   186   void print_bot_on(outputStream* out) {
   174   void print_bot_on(outputStream* out) {
   187     _bot_part.print_on(out);
   175     _bot_part.print_on(out);
   188   }
   176   }
   189 };
   177 
   190 
   178 private:
   191 class HeapRegion: public G1ContiguousSpace {
       
   192   friend class VMStructs;
       
   193   // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
       
   194   template <typename SpaceType>
       
   195   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
       
   196  private:
       
   197 
       
   198   // The remembered set for this region.
   179   // The remembered set for this region.
   199   // (Might want to make this "inline" later, to avoid some alloc failure
       
   200   // issues.)
       
   201   HeapRegionRemSet* _rem_set;
   180   HeapRegionRemSet* _rem_set;
   202 
       
   203   // Auxiliary functions for scan_and_forward support.
       
   204   // See comments for CompactibleSpace for more information.
       
   205   inline HeapWord* scan_limit() const {
       
   206     return top();
       
   207   }
       
   208 
       
   209   inline bool scanned_block_is_obj(const HeapWord* addr) const {
       
   210     return true; // Always true, since scan_limit is top
       
   211   }
       
   212 
       
   213   inline size_t scanned_block_size(const HeapWord* addr) const {
       
   214     return HeapRegion::block_size(addr); // Avoid virtual call
       
   215   }
       
   216 
   181 
   217   void report_region_type_change(G1HeapRegionTraceType::Type to);
   182   void report_region_type_change(G1HeapRegionTraceType::Type to);
   218 
   183 
   219   // Returns whether the given object address refers to a dead object, and either the
   184   // Returns whether the given object address refers to a dead object, and either the
   220   // size of the object (if live) or the size of the block (if dead) in size.
   185   // size of the object (if live) or the size of the block (if dead) in size.
   221   // May
   186   // May
   222   // - only called with obj < top()
   187   // - only called with obj < top()
   223   // - not called on humongous objects or archive regions
   188   // - not called on humongous objects or archive regions
   224   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
   189   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
   225 
   190 
   226  protected:
       
   227   // The index of this region in the heap region sequence.
   191   // The index of this region in the heap region sequence.
   228   uint  _hrm_index;
   192   uint  _hrm_index;
   229 
   193 
   230   HeapRegionType _type;
   194   HeapRegionType _type;
   231 
   195 
   267   // have been allocated in this part since the last mark phase.
   231   // have been allocated in this part since the last mark phase.
   268   // "prev" is the top at the start of the last completed marking.
   232   // "prev" is the top at the start of the last completed marking.
   269   // "next" is the top at the start of the in-progress marking (if any.)
   233   // "next" is the top at the start of the in-progress marking (if any.)
   270   HeapWord* _prev_top_at_mark_start;
   234   HeapWord* _prev_top_at_mark_start;
   271   HeapWord* _next_top_at_mark_start;
   235   HeapWord* _next_top_at_mark_start;
   272   // If a collection pause is in progress, this is the top at the start
       
   273   // of that pause.
       
   274 
   236 
   275   void init_top_at_mark_start() {
   237   void init_top_at_mark_start() {
   276     assert(_prev_marked_bytes == 0 &&
   238     assert(_prev_marked_bytes == 0 &&
   277            _next_marked_bytes == 0,
   239            _next_marked_bytes == 0,
   278            "Must be called after zero_marked_bytes.");
   240            "Must be called after zero_marked_bytes.");
   304                                                      G1CollectedHeap* g1h);
   266                                                      G1CollectedHeap* g1h);
   305 
   267 
   306   // Returns the block size of the given (dead, potentially having its class unloaded) object
   268   // Returns the block size of the given (dead, potentially having its class unloaded) object
   307   // starting at p extending to at most the prev TAMS using the given mark bitmap.
   269   // starting at p extending to at most the prev TAMS using the given mark bitmap.
   308   inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const;
   270   inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const;
   309  public:
   271 public:
   310   HeapRegion(uint hrm_index,
   272   HeapRegion(uint hrm_index, G1BlockOffsetTable* bot, MemRegion mr);
   311              G1BlockOffsetTable* bot,
       
   312              MemRegion mr);
       
   313 
   273 
   314   // Initializing the HeapRegion not only resets the data structure, but also
   274   // Initializing the HeapRegion not only resets the data structure, but also
   315   // resets the BOT for that heap region.
   275   // resets the BOT for that heap region.
   316   // The default values for clear_space means that we will do the clearing if
   276   // The default values for clear_space means that we will do the clearing if
   317   // there's clearing to be done ourselves. We also always mangle the space.
   277   // there's clearing to be done ourselves. We also always mangle the space.
   318   virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
   278   void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
   319 
   279 
   320   static int    LogOfHRGrainBytes;
   280   static int    LogOfHRGrainBytes;
   321   static int    LogOfHRGrainWords;
   281   static int    LogOfHRGrainWords;
   322   static int    LogCardsPerRegion;
   282   static int    LogCardsPerRegion;
   323 
   283 
   362 
   322 
   363   // Scans through the region using the bitmap to determine what
   323   // Scans through the region using the bitmap to determine what
   364   // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
   324   // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
   365   template<typename ApplyToMarkedClosure>
   325   template<typename ApplyToMarkedClosure>
   366   inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
   326   inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
   367   // Override for scan_and_forward support.
       
   368   void prepare_for_compaction(CompactPoint* cp);
       
   369   // Update heap region to be consistent after compaction.
   327   // Update heap region to be consistent after compaction.
   370   void complete_compaction();
   328   void complete_compaction();
   371 
   329 
   372   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
   330   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
   373   inline HeapWord* allocate_no_bot_updates(size_t word_size);
   331   inline HeapWord* allocate_no_bot_updates(size_t word_size);
   705   // Currently there is only one place where this is called with
   663   // Currently there is only one place where this is called with
   706   // vo == UseFullMarking, which is to verify the marking during a
   664   // vo == UseFullMarking, which is to verify the marking during a
   707   // full GC.
   665   // full GC.
   708   void verify(VerifyOption vo, bool *failures) const;
   666   void verify(VerifyOption vo, bool *failures) const;
   709 
   667 
   710   // Override; it uses the "prev" marking information
   668   // Verify using the "prev" marking information
   711   virtual void verify() const;
   669   void verify() const;
   712 
   670 
   713   void verify_rem_set(VerifyOption vo, bool *failures) const;
   671   void verify_rem_set(VerifyOption vo, bool *failures) const;
   714   void verify_rem_set() const;
   672   void verify_rem_set() const;
   715 };
   673 };
   716 
   674 
   722   friend class G1CollectionSetCandidates;
   680   friend class G1CollectionSetCandidates;
   723 
   681 
   724   bool _is_complete;
   682   bool _is_complete;
   725   void set_incomplete() { _is_complete = false; }
   683   void set_incomplete() { _is_complete = false; }
   726 
   684 
   727  public:
   685 public:
   728   HeapRegionClosure(): _is_complete(true) {}
   686   HeapRegionClosure(): _is_complete(true) {}
   729 
   687 
   730   // Typically called on each region until it returns true.
   688   // Typically called on each region until it returns true.
   731   virtual bool do_heap_region(HeapRegion* r) = 0;
   689   virtual bool do_heap_region(HeapRegion* r) = 0;
   732 
   690