src/hotspot/share/gc/g1/heapRegion.hpp
changeset 59218 a1155217a563
parent 59060 fce1fa1bdc91
child 59220 72e15d757e6c
equal deleted inserted replaced
59217:82db5000a845 59218:a1155217a563
    94   HeapWord* compaction_top() const { return _compaction_top; }
    94   HeapWord* compaction_top() const { return _compaction_top; }
    95 
    95 
    96   void set_top(HeapWord* value) { _top = value; }
    96   void set_top(HeapWord* value) { _top = value; }
    97   HeapWord* top() const { return _top; }
    97   HeapWord* top() const { return _top; }
    98 
    98 
       
    99   // See the comment above in the declaration of _pre_dummy_top for an
       
   100   // explanation of what it is.
       
   101   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
       
   102     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
       
   103     _pre_dummy_top = pre_dummy_top;
       
   104   }
       
   105   HeapWord* pre_dummy_top() { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; }
       
   106   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
       
   107 
    99   // Returns true iff the given the heap  region contains the
   108   // Returns true iff the given the heap  region contains the
   100   // given address as part of an allocated object. This may
   109   // given address as part of an allocated object. This may
   101   // be a potentially, so we restrict its use to assertion checks only.
   110   // be a potentially, so we restrict its use to assertion checks only.
   102   bool is_in(const void* p) const {
   111   bool is_in(const void* p) const {
   103     return is_in_reserved(p);
   112     return is_in_reserved(p);
   115 
   124 
   116   bool is_empty() const { return used() == 0; }
   125   bool is_empty() const { return used() == 0; }
   117 
   126 
   118 private:
   127 private:
   119   void reset_after_compaction() { set_top(compaction_top()); }
   128   void reset_after_compaction() { set_top(compaction_top()); }
       
   129 
       
   130   void clear(bool mangle_space);
       
   131 
       
   132   HeapWord* block_start_const(const void* p) const;
       
   133 
       
   134   void mangle_unused_area() PRODUCT_RETURN;
   120 
   135 
   121   // Try to allocate at least min_word_size and up to desired_size from this region.
   136   // Try to allocate at least min_word_size and up to desired_size from this region.
   122   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   137   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   123   // space allocated.
   138   // space allocated.
   124   // This version assumes that all allocation requests to this HeapRegion are properly
   139   // This version assumes that all allocation requests to this HeapRegion are properly
   128   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   143   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   129   // space allocated.
   144   // space allocated.
   130   // This version synchronizes with other calls to par_allocate_impl().
   145   // This version synchronizes with other calls to par_allocate_impl().
   131   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   146   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   132 
   147 
   133   void mangle_unused_area() PRODUCT_RETURN;
       
   134 
       
   135 public:
   148 public:
       
   149   HeapWord* block_start(const void* p);
       
   150 
   136   void object_iterate(ObjectClosure* blk);
   151   void object_iterate(ObjectClosure* blk);
   137 
       
   138   // See the comment above in the declaration of _pre_dummy_top for an
       
   139   // explanation of what it is.
       
   140   void set_pre_dummy_top(HeapWord* pre_dummy_top) {
       
   141     assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
       
   142     _pre_dummy_top = pre_dummy_top;
       
   143   }
       
   144 
       
   145   HeapWord* pre_dummy_top() {
       
   146     return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
       
   147   }
       
   148   void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
       
   149 
       
   150   void clear(bool mangle_space);
       
   151 
       
   152   HeapWord* block_start(const void* p);
       
   153   HeapWord* block_start_const(const void* p) const;
       
   154 
   152 
   155   // Allocation (return NULL if full).  Assumes the caller has established
   153   // Allocation (return NULL if full).  Assumes the caller has established
   156   // mutually exclusive access to the HeapRegion.
   154   // mutually exclusive access to the HeapRegion.
   157   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   155   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   158   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   156   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   159   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   157   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   160 
   158 
   161   HeapWord* allocate(size_t word_size);
   159   HeapWord* allocate(size_t word_size);
   162   HeapWord* par_allocate(size_t word_size);
   160   HeapWord* par_allocate(size_t word_size);
   163 
   161 
   164   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
   162   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
   165 
   163   inline HeapWord* allocate_no_bot_updates(size_t word_size);
   166   // MarkSweep support phase3
   164   inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
       
   165 
       
   166   // Full GC support methods.
       
   167 
   167   HeapWord* initialize_threshold();
   168   HeapWord* initialize_threshold();
   168   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   169   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
       
   170   // Update heap region to be consistent after Full GC compaction.
       
   171   void reset_humongous_during_compaction() {
       
   172     assert(is_humongous(),
       
   173            "should only be called for humongous regions");
       
   174 
       
   175     zero_marked_bytes();
       
   176     init_top_at_mark_start();
       
   177   }
       
   178   // Update heap region to be consistent after Full GC compaction.
       
   179   void complete_compaction();
       
   180 
       
   181   // All allocated blocks are occupied by objects in a HeapRegion
       
   182   bool block_is_obj(const HeapWord* p) const;
       
   183 
       
   184   // Returns whether the given object is dead based on TAMS and bitmap.
       
   185   bool is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const;
       
   186 
       
   187   // Returns the object size for all valid block starts
       
   188   // and the amount of unallocated words if called on top()
       
   189   size_t block_size(const HeapWord* p) const;
       
   190 
       
   191   // Scans through the region using the bitmap to determine what
       
   192   // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
       
   193   template<typename ApplyToMarkedClosure>
       
   194   inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
   169 
   195 
   170   void reset_bot() {
   196   void reset_bot() {
   171     _bot_part.reset_bot();
   197     _bot_part.reset_bot();
   172   }
       
   173 
       
   174   void print_bot_on(outputStream* out) {
       
   175     _bot_part.print_on(out);
       
   176   }
   198   }
   177 
   199 
   178 private:
   200 private:
   179   // The remembered set for this region.
   201   // The remembered set for this region.
   180   HeapRegionRemSet* _rem_set;
   202   HeapRegionRemSet* _rem_set;
   181 
   203 
   182   void report_region_type_change(G1HeapRegionTraceType::Type to);
   204   // Cached index of this region in the heap region sequence.
   183 
       
   184   // Returns whether the given object address refers to a dead object, and either the
       
   185   // size of the object (if live) or the size of the block (if dead) in size.
       
   186   // May
       
   187   // - only called with obj < top()
       
   188   // - not called on humongous objects or archive regions
       
   189   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
       
   190 
       
   191   // The index of this region in the heap region sequence.
       
   192   uint  _hrm_index;
   205   uint  _hrm_index;
   193 
   206 
   194   HeapRegionType _type;
   207   HeapRegionType _type;
   195 
   208 
   196   // For a humongous region, region in which it starts.
   209   // For a humongous region, region in which it starts.
   197   HeapRegion* _humongous_start_region;
   210   HeapRegion* _humongous_start_region;
   198 
   211 
   199   // True iff an attempt to evacuate an object in the region failed.
   212   // True iff an attempt to evacuate an object in the region failed.
   200   bool _evacuation_failed;
   213   bool _evacuation_failed;
       
   214 
       
   215   static const uint InvalidCSetIndex = UINT_MAX;
       
   216 
       
   217   // The index in the optional regions array, if this region
       
   218   // is considered optional during a mixed collections.
       
   219   uint _index_in_opt_cset;
   201 
   220 
   202   // Fields used by the HeapRegionSetBase class and subclasses.
   221   // Fields used by the HeapRegionSetBase class and subclasses.
   203   HeapRegion* _next;
   222   HeapRegion* _next;
   204   HeapRegion* _prev;
   223   HeapRegion* _prev;
   205 #ifdef ASSERT
   224 #ifdef ASSERT
   206   HeapRegionSetBase* _containing_set;
   225   HeapRegionSetBase* _containing_set;
   207 #endif // ASSERT
   226 #endif // ASSERT
   208 
       
   209   // We use concurrent marking to determine the amount of live data
       
   210   // in each heap region.
       
   211   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
       
   212   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
       
   213 
       
   214   // The calculated GC efficiency of the region.
       
   215   double _gc_efficiency;
       
   216 
       
   217   static const uint InvalidCSetIndex = UINT_MAX;
       
   218 
       
   219   // The index in the optional regions array, if this region
       
   220   // is considered optional during a mixed collections.
       
   221   uint _index_in_opt_cset;
       
   222 
       
   223   // Data for young region survivor prediction.
       
   224   uint  _young_index_in_cset;
       
   225   SurvRateGroup* _surv_rate_group;
       
   226   int  _age_index;
       
   227 
   227 
   228   // The start of the unmarked area. The unmarked area extends from this
   228   // The start of the unmarked area. The unmarked area extends from this
   229   // word until the top and/or end of the region, and is the part
   229   // word until the top and/or end of the region, and is the part
   230   // of the region for which no marking was done, i.e. objects may
   230   // of the region for which no marking was done, i.e. objects may
   231   // have been allocated in this part since the last mark phase.
   231   // have been allocated in this part since the last mark phase.
   232   // "prev" is the top at the start of the last completed marking.
   232   // "prev" is the top at the start of the last completed marking.
   233   // "next" is the top at the start of the in-progress marking (if any.)
   233   // "next" is the top at the start of the in-progress marking (if any.)
   234   HeapWord* _prev_top_at_mark_start;
   234   HeapWord* _prev_top_at_mark_start;
   235   HeapWord* _next_top_at_mark_start;
   235   HeapWord* _next_top_at_mark_start;
   236 
   236 
       
   237   // We use concurrent marking to determine the amount of live data
       
   238   // in each heap region.
       
   239   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
       
   240   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
       
   241 
   237   void init_top_at_mark_start() {
   242   void init_top_at_mark_start() {
   238     assert(_prev_marked_bytes == 0 &&
   243     assert(_prev_marked_bytes == 0 &&
   239            _next_marked_bytes == 0,
   244            _next_marked_bytes == 0,
   240            "Must be called after zero_marked_bytes.");
   245            "Must be called after zero_marked_bytes.");
   241     HeapWord* bot = bottom();
   246     _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
   242     _prev_top_at_mark_start = bot;
   247   }
   243     _next_top_at_mark_start = bot;
   248 
   244   }
   249   // Data for young region survivor prediction.
       
   250   uint  _young_index_in_cset;
       
   251   SurvRateGroup* _surv_rate_group;
       
   252   int  _age_index;
   245 
   253 
   246   // Cached attributes used in the collection set policy information
   254   // Cached attributes used in the collection set policy information
   247 
   255 
   248   // The RSet length that was added to the total value
   256   // The calculated GC efficiency of the region.
       
   257   double _gc_efficiency;
       
   258 
       
   259   // The remembered set length that was added to the total value
   249   // for the collection set.
   260   // for the collection set.
   250   size_t _recorded_rs_length;
   261   size_t _recorded_rs_length;
   251 
   262 
   252   // The predicted elapsed time that was added to total value
   263   // The predicted elapsed time that was added to total value
   253   // for the collection set.
   264   // for the collection set.
   254   double _predicted_elapsed_time_ms;
   265   double _predicted_elapsed_time_ms;
   255 
   266 
   256   uint _node_index;
   267   uint _node_index;
       
   268 
       
   269   void report_region_type_change(G1HeapRegionTraceType::Type to);
       
   270 
       
   271   // Returns whether the given object address refers to a dead object, and either the
       
   272   // size of the object (if live) or the size of the block (if dead) in size.
       
   273   // May
       
   274   // - only called with obj < top()
       
   275   // - not called on humongous objects or archive regions
       
   276   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
   257 
   277 
   258   // Iterate over the references covered by the given MemRegion in a humongous
   278   // Iterate over the references covered by the given MemRegion in a humongous
   259   // object and apply the given closure to them.
   279   // object and apply the given closure to them.
   260   // Humongous objects are allocated directly in the old-gen. So we need special
   280   // Humongous objects are allocated directly in the old-gen. So we need special
   261   // handling for concurrent processing encountering an in-progress allocation.
   281   // handling for concurrent processing encountering an in-progress allocation.
   271   // starting at p extending to at most the prev TAMS using the given mark bitmap.
   291   // starting at p extending to at most the prev TAMS using the given mark bitmap.
   272   inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const;
   292   inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const;
   273 public:
   293 public:
   274   HeapRegion(uint hrm_index, G1BlockOffsetTable* bot, MemRegion mr);
   294   HeapRegion(uint hrm_index, G1BlockOffsetTable* bot, MemRegion mr);
   275 
   295 
       
   296   // If this region is a member of a HeapRegionManager, the index in that
       
   297   // sequence, otherwise -1.
       
   298   uint hrm_index() const { return _hrm_index; }
       
   299 
   276   // Initializing the HeapRegion not only resets the data structure, but also
   300   // Initializing the HeapRegion not only resets the data structure, but also
   277   // resets the BOT for that heap region.
   301   // resets the BOT for that heap region.
   278   // The default values for clear_space means that we will do the clearing if
   302   // The default values for clear_space means that we will do the clearing if
   279   // there's clearing to be done ourselves. We also always mangle the space.
   303   // there's clearing to be done ourselves. We also always mangle the space.
   280   void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
   304   void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
   289 
   313 
   290   static size_t align_up_to_region_byte_size(size_t sz) {
   314   static size_t align_up_to_region_byte_size(size_t sz) {
   291     return (sz + (size_t) GrainBytes - 1) &
   315     return (sz + (size_t) GrainBytes - 1) &
   292                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   316                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   293   }
   317   }
   294 
       
   295 
   318 
   296   // Returns whether a field is in the same region as the obj it points to.
   319   // Returns whether a field is in the same region as the obj it points to.
   297   template <typename T>
   320   template <typename T>
   298   static bool is_in_same_region(T* p, oop obj) {
   321   static bool is_in_same_region(T* p, oop obj) {
   299     assert(p != NULL, "p can't be NULL");
   322     assert(p != NULL, "p can't be NULL");
   309   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
   332   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
   310   // CardsPerRegion). All those fields are considered constant
   333   // CardsPerRegion). All those fields are considered constant
   311   // throughout the JVM's execution, therefore they should only be set
   334   // throughout the JVM's execution, therefore they should only be set
   312   // up once during initialization time.
   335   // up once during initialization time.
   313   static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
   336   static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
   314 
       
   315   // All allocated blocks are occupied by objects in a HeapRegion
       
   316   bool block_is_obj(const HeapWord* p) const;
       
   317 
       
   318   // Returns whether the given object is dead based on TAMS and bitmap.
       
   319   bool is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const;
       
   320 
       
   321   // Returns the object size for all valid block starts
       
   322   // and the amount of unallocated words if called on top()
       
   323   size_t block_size(const HeapWord* p) const;
       
   324 
       
   325   // Scans through the region using the bitmap to determine what
       
   326   // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
       
   327   template<typename ApplyToMarkedClosure>
       
   328   inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
       
   329   // Update heap region to be consistent after compaction.
       
   330   void complete_compaction();
       
   331 
       
   332   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
       
   333   inline HeapWord* allocate_no_bot_updates(size_t word_size);
       
   334   inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
       
   335 
       
   336   // If this region is a member of a HeapRegionManager, the index in that
       
   337   // sequence, otherwise -1.
       
   338   uint hrm_index() const { return _hrm_index; }
       
   339 
   337 
   340   // The number of bytes marked live in the region in the last marking phase.
   338   // The number of bytes marked live in the region in the last marking phase.
   341   size_t marked_bytes()    { return _prev_marked_bytes; }
   339   size_t marked_bytes()    { return _prev_marked_bytes; }
   342   size_t live_bytes() {
   340   size_t live_bytes() {
   343     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
   341     return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
   376   }
   374   }
   377 
   375 
   378   void zero_marked_bytes()      {
   376   void zero_marked_bytes()      {
   379     _prev_marked_bytes = _next_marked_bytes = 0;
   377     _prev_marked_bytes = _next_marked_bytes = 0;
   380   }
   378   }
       
   379   // Get the start of the unmarked area in this region.
       
   380   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
       
   381   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
       
   382 
       
   383   // Note the start or end of marking. This tells the heap region
       
   384   // that the collector is about to start or has finished (concurrently)
       
   385   // marking the heap.
       
   386 
       
   387   // Notify the region that concurrent marking is starting. Initialize
       
   388   // all fields related to the next marking info.
       
   389   inline void note_start_of_marking();
       
   390 
       
   391   // Notify the region that concurrent marking has finished. Copy the
       
   392   // (now finalized) next marking info fields into the prev marking
       
   393   // info fields.
       
   394   inline void note_end_of_marking();
   381 
   395 
   382   const char* get_type_str() const { return _type.get_str(); }
   396   const char* get_type_str() const { return _type.get_str(); }
   383   const char* get_short_type_str() const { return _type.get_short_str(); }
   397   const char* get_short_type_str() const { return _type.get_short_str(); }
   384   G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
   398   G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
   385 
   399 
   407   // should not be marked during mark/sweep. This allows the address
   421   // should not be marked during mark/sweep. This allows the address
   408   // space to be shared by JVM instances.
   422   // space to be shared by JVM instances.
   409   bool is_archive()        const { return _type.is_archive(); }
   423   bool is_archive()        const { return _type.is_archive(); }
   410   bool is_open_archive()   const { return _type.is_open_archive(); }
   424   bool is_open_archive()   const { return _type.is_open_archive(); }
   411   bool is_closed_archive() const { return _type.is_closed_archive(); }
   425   bool is_closed_archive() const { return _type.is_closed_archive(); }
       
   426 
       
   427   void set_free();
       
   428 
       
   429   void set_eden();
       
   430   void set_eden_pre_gc();
       
   431   void set_survivor();
       
   432 
       
   433   void move_to_old();
       
   434   void set_old();
       
   435 
       
   436   void set_open_archive();
       
   437   void set_closed_archive();
   412 
   438 
   413   // For a humongous region, region in which it starts.
   439   // For a humongous region, region in which it starts.
   414   HeapRegion* humongous_start_region() const {
   440   HeapRegion* humongous_start_region() const {
   415     return _humongous_start_region;
   441     return _humongous_start_region;
   416   }
   442   }
   440 
   466 
   441   // Methods used by the HeapRegionSetBase class and subclasses.
   467   // Methods used by the HeapRegionSetBase class and subclasses.
   442 
   468 
   443   // Getter and setter for the next and prev fields used to link regions into
   469   // Getter and setter for the next and prev fields used to link regions into
   444   // linked lists.
   470   // linked lists.
       
   471   void set_next(HeapRegion* next) { _next = next; }
   445   HeapRegion* next()              { return _next; }
   472   HeapRegion* next()              { return _next; }
       
   473 
       
   474   void set_prev(HeapRegion* prev) { _prev = prev; }
   446   HeapRegion* prev()              { return _prev; }
   475   HeapRegion* prev()              { return _prev; }
   447 
       
   448   void set_next(HeapRegion* next) { _next = next; }
       
   449   void set_prev(HeapRegion* prev) { _prev = prev; }
       
   450 
   476 
   451   // Every region added to a set is tagged with a reference to that
   477   // Every region added to a set is tagged with a reference to that
   452   // set. This is used for doing consistency checking to make sure that
   478   // set. This is used for doing consistency checking to make sure that
   453   // the contents of a set are as they should be and it's only
   479   // the contents of a set are as they should be and it's only
   454   // available in non-product builds.
   480   // available in non-product builds.
   478   // If locked is true, assume we are the only thread doing this operation.
   504   // If locked is true, assume we are the only thread doing this operation.
   479   void hr_clear(bool skip_remset, bool clear_space, bool locked = false);
   505   void hr_clear(bool skip_remset, bool clear_space, bool locked = false);
   480   // Clear the card table corresponding to this region.
   506   // Clear the card table corresponding to this region.
   481   void clear_cardtable();
   507   void clear_cardtable();
   482 
   508 
   483   // Get the start of the unmarked area in this region.
   509   // Returns the "evacuation_failed" property of the region.
   484   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   510   bool evacuation_failed() { return _evacuation_failed; }
   485   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
   511 
   486 
   512   // Sets the "evacuation_failed" property of the region.
   487   // Note the start or end of marking. This tells the heap region
   513   void set_evacuation_failed(bool b) {
   488   // that the collector is about to start or has finished (concurrently)
   514     _evacuation_failed = b;
   489   // marking the heap.
   515 
   490 
   516     if (b) {
   491   // Notify the region that concurrent marking is starting. Initialize
   517       _next_marked_bytes = 0;
   492   // all fields related to the next marking info.
   518     }
   493   inline void note_start_of_marking();
   519   }
   494 
       
   495   // Notify the region that concurrent marking has finished. Copy the
       
   496   // (now finalized) next marking info fields into the prev marking
       
   497   // info fields.
       
   498   inline void note_end_of_marking();
       
   499 
   520 
   500   // Notify the region that we are about to start processing
   521   // Notify the region that we are about to start processing
   501   // self-forwarded objects during evac failure handling.
   522   // self-forwarded objects during evac failure handling.
   502   void note_self_forwarding_removal_start(bool during_initial_mark,
   523   void note_self_forwarding_removal_start(bool during_initial_mark,
   503                                           bool during_conc_mark);
   524                                           bool during_conc_mark);
   504 
   525 
   505   // Notify the region that we have finished processing self-forwarded
   526   // Notify the region that we have finished processing self-forwarded
   506   // objects during evac failure handling.
   527   // objects during evac failure handling.
   507   void note_self_forwarding_removal_end(size_t marked_bytes);
   528   void note_self_forwarding_removal_end(size_t marked_bytes);
   508 
   529 
   509   void reset_during_compaction() {
       
   510     assert(is_humongous(),
       
   511            "should only be called for humongous regions");
       
   512 
       
   513     zero_marked_bytes();
       
   514     init_top_at_mark_start();
       
   515   }
       
   516 
       
   517   void calc_gc_efficiency(void);
       
   518   double gc_efficiency() const { return _gc_efficiency;}
       
   519 
       
   520   uint index_in_opt_cset() const {
   530   uint index_in_opt_cset() const {
   521     assert(has_index_in_opt_cset(), "Opt cset index not set.");
   531     assert(has_index_in_opt_cset(), "Opt cset index not set.");
   522     return _index_in_opt_cset;
   532     return _index_in_opt_cset;
   523   }
   533   }
   524   bool has_index_in_opt_cset() const { return _index_in_opt_cset != InvalidCSetIndex; }
   534   bool has_index_in_opt_cset() const { return _index_in_opt_cset != InvalidCSetIndex; }
   525   void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
   535   void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
   526   void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
   536   void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
       
   537 
       
   538   void calc_gc_efficiency(void);
       
   539   double gc_efficiency() const { return _gc_efficiency;}
   527 
   540 
   528   uint  young_index_in_cset() const { return _young_index_in_cset; }
   541   uint  young_index_in_cset() const { return _young_index_in_cset; }
   529   void clear_young_index_in_cset() { _young_index_in_cset = 0; }
   542   void clear_young_index_in_cset() { _young_index_in_cset = 0; }
   530   void set_young_index_in_cset(uint index) {
   543   void set_young_index_in_cset(uint index) {
   531     assert(index != UINT_MAX, "just checking");
   544     assert(index != UINT_MAX, "just checking");
   577     } else {
   590     } else {
   578       assert(_age_index == -1, "pre-condition");
   591       assert(_age_index == -1, "pre-condition");
   579     }
   592     }
   580   }
   593   }
   581 
   594 
   582   void set_free();
       
   583 
       
   584   void set_eden();
       
   585   void set_eden_pre_gc();
       
   586   void set_survivor();
       
   587 
       
   588   void move_to_old();
       
   589   void set_old();
       
   590 
       
   591   void set_open_archive();
       
   592   void set_closed_archive();
       
   593 
       
   594   // Determine if an object has been allocated since the last
   595   // Determine if an object has been allocated since the last
   595   // mark performed by the collector. This returns true iff the object
   596   // mark performed by the collector. This returns true iff the object
   596   // is within the unmarked area of the region.
   597   // is within the unmarked area of the region.
   597   bool obj_allocated_since_prev_marking(oop obj) const {
   598   bool obj_allocated_since_prev_marking(oop obj) const {
   598     return (HeapWord *) obj >= prev_top_at_mark_start();
   599     return (HeapWord *) obj >= prev_top_at_mark_start();
   599   }
   600   }
   600   bool obj_allocated_since_next_marking(oop obj) const {
   601   bool obj_allocated_since_next_marking(oop obj) const {
   601     return (HeapWord *) obj >= next_top_at_mark_start();
   602     return (HeapWord *) obj >= next_top_at_mark_start();
   602   }
       
   603 
       
   604   // Returns the "evacuation_failed" property of the region.
       
   605   bool evacuation_failed() { return _evacuation_failed; }
       
   606 
       
   607   // Sets the "evacuation_failed" property of the region.
       
   608   void set_evacuation_failed(bool b) {
       
   609     _evacuation_failed = b;
       
   610 
       
   611     if (b) {
       
   612       _next_marked_bytes = 0;
       
   613     }
       
   614   }
   603   }
   615 
   604 
   616   // Iterate over the objects overlapping the given memory region, applying cl
   605   // Iterate over the objects overlapping the given memory region, applying cl
   617   // to all references in the region.  This is a helper for
   606   // to all references in the region.  This is a helper for
   618   // G1RemSet::refine_card*, and is tightly coupled with them.
   607   // G1RemSet::refine_card*, and is tightly coupled with them.