hotspot/src/share/vm/gc/g1/heapRegion.hpp
changeset 32389 626f27450e12
parent 32185 49a57ff2c3cb
child 33105 294e48b4f704
equal deleted inserted replaced
32388:81663b0d3631 32389:626f27450e12
   107 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
   107 // The time stamps are re-initialized to zero at cleanup and at Full GCs.
   108 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
   108 // The current scheme that uses sequential unsigned ints will fail only if we have 4b
   109 // evacuation pauses between two cleanups, which is _highly_ unlikely.
   109 // evacuation pauses between two cleanups, which is _highly_ unlikely.
   110 class G1OffsetTableContigSpace: public CompactibleSpace {
   110 class G1OffsetTableContigSpace: public CompactibleSpace {
   111   friend class VMStructs;
   111   friend class VMStructs;
   112   HeapWord* _top;
   112   HeapWord* volatile _top;
   113   HeapWord* volatile _scan_top;
   113   HeapWord* volatile _scan_top;
   114  protected:
   114  protected:
   115   G1BlockOffsetArrayContigSpace _offsets;
   115   G1BlockOffsetArrayContigSpace _offsets;
   116   Mutex _par_alloc_lock;
   116   Mutex _par_alloc_lock;
   117   volatile unsigned _gc_time_stamp;
   117   volatile unsigned _gc_time_stamp;
   132 
   132 
   133  protected:
   133  protected:
   134   // Reset the G1OffsetTableContigSpace.
   134   // Reset the G1OffsetTableContigSpace.
   135   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   135   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   136 
   136 
   137   HeapWord** top_addr() { return &_top; }
   137   HeapWord* volatile* top_addr() { return &_top; }
   138   // Allocation helpers (return NULL if full).
   138   // Try to allocate at least min_word_size and up to desired_size from this Space.
   139   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
   139   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
   140   inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
   140   // space allocated.
       
   141   // This version assumes that all allocation requests to this Space are properly
       
   142   // synchronized.
       
   143   inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
       
   144   // Try to allocate at least min_word_size and up to desired_size from this Space.
       
   145   // Returns NULL if not possible, otherwise sets actual_word_size to the amount of
       
   146   // space allocated.
       
   147   // This version synchronizes with other calls to par_allocate_impl().
       
   148   inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
   141 
   149 
   142  public:
   150  public:
   143   void reset_after_compaction() { set_top(compaction_top()); }
   151   void reset_after_compaction() { set_top(compaction_top()); }
   144 
   152 
   145   size_t used() const { return byte_size(bottom(), top()); }
   153   size_t used() const { return byte_size(bottom(), top()); }
   177   virtual void clear(bool mangle_space);
   185   virtual void clear(bool mangle_space);
   178 
   186 
   179   HeapWord* block_start(const void* p);
   187   HeapWord* block_start(const void* p);
   180   HeapWord* block_start_const(const void* p) const;
   188   HeapWord* block_start_const(const void* p) const;
   181 
   189 
   182   // Add offset table update.
   190   // Allocation (return NULL if full).  Assumes the caller has established
       
   191   // mutually exclusive access to the space.
       
   192   HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
       
   193   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
       
   194   HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
       
   195 
   183   virtual HeapWord* allocate(size_t word_size);
   196   virtual HeapWord* allocate(size_t word_size);
   184   HeapWord* par_allocate(size_t word_size);
   197   virtual HeapWord* par_allocate(size_t word_size);
   185 
   198 
   186   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
   199   HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
   187 
   200 
   188   // MarkSweep support phase3
   201   // MarkSweep support phase3
   189   virtual HeapWord* initialize_threshold();
   202   virtual HeapWord* initialize_threshold();
   349   size_t block_size(const HeapWord* p) const;
   362   size_t block_size(const HeapWord* p) const;
   350 
   363 
   351   // Override for scan_and_forward support.
   364   // Override for scan_and_forward support.
   352   void prepare_for_compaction(CompactPoint* cp);
   365   void prepare_for_compaction(CompactPoint* cp);
   353 
   366 
   354   inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
   367   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
   355   inline HeapWord* allocate_no_bot_updates(size_t word_size);
   368   inline HeapWord* allocate_no_bot_updates(size_t word_size);
       
   369   inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
   356 
   370 
   357   // If this region is a member of a HeapRegionManager, the index in that
   371   // If this region is a member of a HeapRegionManager, the index in that
   358   // sequence, otherwise -1.
   372   // sequence, otherwise -1.
   359   uint hrm_index() const { return _hrm_index; }
   373   uint hrm_index() const { return _hrm_index; }
   360 
   374