hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp
changeset 33786 ac8da6513351
parent 33105 294e48b4f704
child 35461 1068dcb8d315
equal deleted inserted replaced
33785:f5e6ef11d24b 33786:ac8da6513351
   113 }
   113 }
   114 
   114 
   115 inline bool
   115 inline bool
   116 HeapRegion::block_is_obj(const HeapWord* p) const {
   116 HeapRegion::block_is_obj(const HeapWord* p) const {
   117   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   117   G1CollectedHeap* g1h = G1CollectedHeap::heap();
       
   118 
       
   119   if (!this->is_in(p)) {
       
   120     assert(is_continues_humongous(), "This case can only happen for humongous regions");
       
   121     return (p == humongous_start_region()->bottom());
       
   122   }
   118   if (ClassUnloadingWithConcurrentMark) {
   123   if (ClassUnloadingWithConcurrentMark) {
   119     return !g1h->is_obj_dead(oop(p), this);
   124     return !g1h->is_obj_dead(oop(p), this);
   120   }
   125   }
   121   return p < top();
   126   return p < top();
   122 }
   127 }
   174 
   179 
   175 inline void HeapRegion::note_end_of_marking() {
   180 inline void HeapRegion::note_end_of_marking() {
   176   _prev_top_at_mark_start = _next_top_at_mark_start;
   181   _prev_top_at_mark_start = _next_top_at_mark_start;
   177   _prev_marked_bytes = _next_marked_bytes;
   182   _prev_marked_bytes = _next_marked_bytes;
   178   _next_marked_bytes = 0;
   183   _next_marked_bytes = 0;
   179 
       
   180   assert(_prev_marked_bytes <=
       
   181          (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
       
   182          HeapWordSize, "invariant");
       
   183 }
   184 }
   184 
   185 
   185 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
   186 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
   186   if (is_survivor()) {
   187   if (is_survivor()) {
   187     // This is how we always allocate survivors.
   188     // This is how we always allocate survivors.