8139867: Change how startsHumongous and continuesHumongous regions work in G1.
authordavid
Mon, 09 Nov 2015 09:19:39 +0100
changeset 33786 ac8da6513351
parent 33785 f5e6ef11d24b
child 33788 07cad4f072b5
child 33789 4a76a42bd42e
8139867: Change how startsHumongous and continuesHumongous regions work in G1. Reviewed-by: tschatzl, tbenson
hotspot/src/share/vm/gc/g1/concurrentMark.cpp
hotspot/src/share/vm/gc/g1/concurrentMark.hpp
hotspot/src/share/vm/gc/g1/concurrentMark.inline.hpp
hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.cpp
hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.hpp
hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp
hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp
hotspot/src/share/vm/gc/g1/g1HRPrinter.cpp
hotspot/src/share/vm/gc/g1/g1HRPrinter.hpp
hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp
hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp
hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp
hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp
hotspot/src/share/vm/gc/g1/g1RemSet.inline.hpp
hotspot/src/share/vm/gc/g1/g1StringDedup.cpp
hotspot/src/share/vm/gc/g1/heapRegion.cpp
hotspot/src/share/vm/gc/g1/heapRegion.hpp
hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp
hotspot/src/share/vm/gc/g1/heapRegionManager.cpp
hotspot/src/share/vm/gc/g1/heapRegionManager.hpp
hotspot/src/share/vm/gc/g1/heapRegionManager.inline.hpp
hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp
hotspot/src/share/vm/gc/g1/satbQueue.cpp
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -802,12 +802,8 @@
     // This closure can be called concurrently to the mutator, so we must make sure
     // that the result of the getNextMarkedWordAddress() call is compared to the
     // value passed to it as limit to detect any found bits.
-    // We can use the region's orig_end() for the limit and the comparison value
-    // as it always contains the "real" end of the region that never changes and
-    // has no side effects.
-    // Due to the latter, there can also be no problem with the compiler generating
-    // reloads of the orig_end() call.
-    HeapWord* end = r->orig_end();
+    // end never changes in G1.
+    HeapWord* end = r->end();
     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
   }
 };
@@ -821,9 +817,7 @@
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 public:
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->is_continues_humongous()) {
-      r->note_start_of_marking();
-    }
+    r->note_start_of_marking();
     return false;
   }
 };
@@ -1282,22 +1276,10 @@
 
   // Takes a region that's not empty (i.e., it has at least one
   // live object in it and sets its corresponding bit on the region
-  // bitmap to 1. If the region is "starts humongous" it will also set
-  // to 1 the bits on the region bitmap that correspond to its
-  // associated "continues humongous" regions.
+  // bitmap to 1.
   void set_bit_for_region(HeapRegion* hr) {
-    assert(!hr->is_continues_humongous(), "should have filtered those out");
-
     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
-    if (!hr->is_starts_humongous()) {
-      // Normal (non-humongous) case: just set the bit.
-      _region_bm->par_at_put(index, true);
-    } else {
-      // Starts humongous case: calculate how many regions are part of
-      // this humongous region and then set the bit range.
-      BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
-      _region_bm->par_at_put_range(index, end_index, true);
-    }
+    _region_bm->par_at_put(index, true);
   }
 
 public:
@@ -1321,18 +1303,6 @@
     _bm(bm), _region_marked_bytes(0) { }
 
   bool doHeapRegion(HeapRegion* hr) {
-
-    if (hr->is_continues_humongous()) {
-      // We will ignore these here and process them when their
-      // associated "starts humongous" region is processed (see
-      // set_bit_for_heap_region()). Note that we cannot rely on their
-      // associated "starts humongous" region to have their bit set to
-      // 1 since, due to the region chunking in the parallel region
-      // iteration, a "continues humongous" region might be visited
-      // before its associated "starts humongous".
-      return false;
-    }
-
     HeapWord* ntams = hr->next_top_at_mark_start();
     HeapWord* start = hr->bottom();
 
@@ -1370,6 +1340,11 @@
       // Add the size of this object to the number of marked bytes.
       marked_bytes += (size_t)obj_sz * HeapWordSize;
 
+      // This will happen if we are handling a humongous object that spans
+      // several heap regions.
+      if (obj_end > hr->end()) {
+        break;
+      }
       // Find the next marked object after this one.
       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
     }
@@ -1442,17 +1417,6 @@
   int failures() const { return _failures; }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (hr->is_continues_humongous()) {
-      // We will ignore these here and process them when their
-      // associated "starts humongous" region is processed (see
-      // set_bit_for_heap_region()). Note that we cannot rely on their
-      // associated "starts humongous" region to have their bit set to
-      // 1 since, due to the region chunking in the parallel region
-      // iteration, a "continues humongous" region might be visited
-      // before its associated "starts humongous".
-      return false;
-    }
-
     int failures = 0;
 
     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
@@ -1465,10 +1429,25 @@
     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
     size_t act_marked_bytes = hr->next_marked_bytes();
 
-    // We're not OK if expected marked bytes > actual marked bytes. It means
-    // we have missed accounting some objects during the actual marking.
     if (exp_marked_bytes > act_marked_bytes) {
-      failures += 1;
+      if (hr->is_starts_humongous()) {
+        // For start_humongous regions, the size of the whole object will be
+        // in exp_marked_bytes.
+        HeapRegion* region = hr;
+        int num_regions;
+        for (num_regions = 0; region != NULL; num_regions++) {
+          region = _g1h->next_region_in_humongous(region);
+        }
+        if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) {
+          failures += 1;
+        } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) {
+          failures += 1;
+        }
+      } else {
+        // We're not OK if expected marked bytes > actual marked bytes. It means
+        // we have missed accounting some objects during the actual marking.
+        failures += 1;
+      }
     }
 
     // Verify the bit, for this region, in the actual and expected
@@ -1569,18 +1548,6 @@
     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
 
   bool doHeapRegion(HeapRegion* hr) {
-
-    if (hr->is_continues_humongous()) {
-      // We will ignore these here and process them when their
-      // associated "starts humongous" region is processed (see
-      // set_bit_for_heap_region()). Note that we cannot rely on their
-      // associated "starts humongous" region to have their bit set to
-      // 1 since, due to the region chunking in the parallel region
-      // iteration, a "continues humongous" region might be visited
-      // before its associated "starts humongous".
-      return false;
-    }
-
     HeapWord* ntams = hr->next_top_at_mark_start();
     HeapWord* top   = hr->top();
 
@@ -1677,7 +1644,7 @@
   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
 
   bool doHeapRegion(HeapRegion *hr) {
-    if (hr->is_continues_humongous() || hr->is_archive()) {
+    if (hr->is_archive()) {
       return false;
     }
     // We use a claim value of zero here because all regions
@@ -1689,7 +1656,6 @@
       _freed_bytes += hr->used();
       hr->set_containing_set(NULL);
       if (hr->is_humongous()) {
-        assert(hr->is_starts_humongous(), "we should only see starts humongous");
         _humongous_regions_removed.increment(1u, hr->capacity());
         _g1->free_humongous_region(hr, _local_cleanup_list, true);
       } else {
@@ -2338,7 +2304,7 @@
   // circumspect about treating the argument as an object.
   void do_entry(void* entry) const {
     _task->increment_refs_reached();
-    HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
+    HeapRegion* hr = _g1h->heap_region_containing(entry);
     if (entry < hr->next_top_at_mark_start()) {
       // Until we get here, we don't know whether entry refers to a valid
       // object; it could instead have been a stale reference.
@@ -2488,32 +2454,9 @@
   while (finger < _heap_end) {
     assert(_g1h->is_in_g1_reserved(finger), "invariant");
 
-    // Note on how this code handles humongous regions. In the
-    // normal case the finger will reach the start of a "starts
-    // humongous" (SH) region. Its end will either be the end of the
-    // last "continues humongous" (CH) region in the sequence, or the
-    // standard end of the SH region (if the SH is the only region in
-    // the sequence). That way claim_region() will skip over the CH
-    // regions. However, there is a subtle race between a CM thread
-    // executing this method and a mutator thread doing a humongous
-    // object allocation. The two are not mutually exclusive as the CM
-    // thread does not need to hold the Heap_lock when it gets
-    // here. So there is a chance that claim_region() will come across
-    // a free region that's in the progress of becoming a SH or a CH
-    // region. In the former case, it will either
-    //   a) Miss the update to the region's end, in which case it will
-    //      visit every subsequent CH region, will find their bitmaps
-    //      empty, and do nothing, or
-    //   b) Will observe the update of the region's end (in which case
-    //      it will skip the subsequent CH regions).
-    // If it comes across a region that suddenly becomes CH, the
-    // scenario will be similar to b). So, the race between
-    // claim_region() and a humongous object allocation might force us
-    // to do a bit of unnecessary work (due to some unnecessary bitmap
-    // iterations) but it should not introduce and correctness issues.
-    HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
-
-    // Above heap_region_containing_raw may return NULL as we always scan claim
+    HeapRegion* curr_region = _g1h->heap_region_containing(finger);
+
+    // Above heap_region_containing may return NULL as we always scan claim
     // until the end of the heap. In this case, just jump to the next region.
     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
 
@@ -2589,16 +2532,9 @@
   // Verify the global finger
   HeapWord* global_finger = finger();
   if (global_finger != NULL && global_finger < _heap_end) {
-    // The global finger always points to a heap region boundary. We
-    // use heap_region_containing_raw() to get the containing region
-    // given that the global finger could be pointing to a free region
-    // which subsequently becomes continues humongous. If that
-    // happens, heap_region_containing() will return the bottom of the
-    // corresponding starts humongous region and the check below will
-    // not hold any more.
     // Since we always iterate over all regions, we might get a NULL HeapRegion
     // here.
-    HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
+    HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
@@ -2611,7 +2547,7 @@
     HeapWord* task_finger = task->finger();
     if (task_finger != NULL && task_finger < _heap_end) {
       // See above note on the global finger verification.
-      HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
+      HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
                 !task_hr->in_collection_set(),
                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
@@ -2639,17 +2575,6 @@
     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (hr->is_continues_humongous()) {
-      // We will ignore these here and process them when their
-      // associated "starts humongous" region is processed.
-      // Note that we cannot rely on their associated
-      // "starts humongous" region to have their bit set to 1
-      // since, due to the region chunking in the parallel region
-      // iteration, a "continues humongous" region might be visited
-      // before its associated "starts humongous".
-      return false;
-    }
-
     HeapWord* start = hr->bottom();
     HeapWord* limit = hr->next_top_at_mark_start();
     HeapWord* end = hr->end();
@@ -2957,8 +2882,6 @@
 void CMTask::setup_for_region(HeapRegion* hr) {
   assert(hr != NULL,
         "claim_region() should have filtered out NULL regions");
-  assert(!hr->is_continues_humongous(),
-        "claim_region() should have filtered out continues humongous regions");
   _curr_region  = hr;
   _finger       = hr->bottom();
   update_region_limit();
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -772,16 +772,13 @@
                            size_t* marked_bytes_array,
                            BitMap* task_card_bm);
 
-  // Counts the given memory region in the task/worker counting
-  // data structures for the given worker id.
-  inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
-
   // Counts the given object in the given task/worker counting
   // data structures.
   inline void count_object(oop obj,
                            HeapRegion* hr,
                            size_t* marked_bytes_array,
-                           BitMap* task_card_bm);
+                           BitMap* task_card_bm,
+                           size_t word_size);
 
   // Attempts to mark the given object and, if successful, counts
   // the object in the given task/worker counting structures.
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -89,9 +89,7 @@
   size_t region_size_bytes = mr.byte_size();
   uint index = hr->hrm_index();
 
-  assert(!hr->is_continues_humongous(), "should not be HC region");
   assert(hr == g1h->heap_region_containing(start), "sanity");
-  assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
   assert(marked_bytes_array != NULL, "pre-condition");
   assert(task_card_bm != NULL, "pre-condition");
 
@@ -116,23 +114,23 @@
   set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
 }
 
-// Counts the given memory region in the task/worker counting
-// data structures for the given worker id.
-inline void ConcurrentMark::count_region(MemRegion mr,
-                                         HeapRegion* hr,
-                                         uint worker_id) {
-  size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
-  BitMap* task_card_bm = count_card_bitmap_for(worker_id);
-  count_region(mr, hr, marked_bytes_array, task_card_bm);
-}
-
 // Counts the given object in the given task/worker counting data structures.
 inline void ConcurrentMark::count_object(oop obj,
                                          HeapRegion* hr,
                                          size_t* marked_bytes_array,
-                                         BitMap* task_card_bm) {
-  MemRegion mr((HeapWord*)obj, obj->size());
-  count_region(mr, hr, marked_bytes_array, task_card_bm);
+                                         BitMap* task_card_bm,
+                                         size_t word_size) {
+  assert(!hr->is_continues_humongous(), "Cannot enter count_object with continues humongous");
+  if (!hr->is_starts_humongous()) {
+    MemRegion mr((HeapWord*)obj, word_size);
+    count_region(mr, hr, marked_bytes_array, task_card_bm);
+  } else {
+    do {
+      MemRegion mr(hr->bottom(), hr->top());
+      count_region(mr, hr, marked_bytes_array, task_card_bm);
+      hr = _g1h->next_region_in_humongous(hr);
+    } while (hr != NULL);
+  }
 }
 
 // Attempts to mark the given object and, if successful, counts
@@ -141,10 +139,9 @@
                                                HeapRegion* hr,
                                                size_t* marked_bytes_array,
                                                BitMap* task_card_bm) {
-  HeapWord* addr = (HeapWord*)obj;
-  if (_nextMarkBitMap->parMark(addr)) {
+  if (_nextMarkBitMap->parMark((HeapWord*)obj)) {
     // Update the task specific count data for the object.
-    count_object(obj, hr, marked_bytes_array, task_card_bm);
+    count_object(obj, hr, marked_bytes_array, task_card_bm, obj->size());
     return true;
   }
   return false;
@@ -157,10 +154,10 @@
                                                size_t word_size,
                                                HeapRegion* hr,
                                                uint worker_id) {
-  HeapWord* addr = (HeapWord*)obj;
-  if (_nextMarkBitMap->parMark(addr)) {
-    MemRegion mr(addr, word_size);
-    count_region(mr, hr, worker_id);
+  if (_nextMarkBitMap->parMark((HeapWord*)obj)) {
+    size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
+    BitMap* task_card_bm = count_card_bitmap_for(worker_id);
+    count_object(obj, hr, marked_bytes_array, task_card_bm, word_size);
     return true;
   }
   return false;
@@ -351,7 +348,7 @@
       // Only get the containing region if the object is not marked on the
       // bitmap (otherwise, it's a waste of time since we won't do
       // anything with it).
-      HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
+      HeapRegion* hr = _g1h->heap_region_containing(obj);
       if (!hr->obj_allocated_since_next_marking(obj)) {
         make_reference_grey(obj, hr);
       }
@@ -371,7 +368,7 @@
   assert(obj != NULL, "pre-condition");
   HeapWord* addr = (HeapWord*) obj;
   if (hr == NULL) {
-    hr = _g1h->heap_region_containing_raw(addr);
+    hr = _g1h->heap_region_containing(addr);
   } else {
     assert(hr->is_in(addr), "pre-condition");
   }
@@ -380,16 +377,6 @@
   // header it's impossible to get back a HC region.
   assert(!hr->is_continues_humongous(), "sanity");
 
-  // We cannot assert that word_size == obj->size() given that obj
-  // might not be in a consistent state (another thread might be in
-  // the process of copying it). So the best thing we can do is to
-  // assert that word_size is under an upper bound which is its
-  // containing region's capacity.
-  assert(word_size * HeapWordSize <= hr->capacity(),
-         "size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT,
-         word_size * HeapWordSize, hr->capacity(),
-         HR_FORMAT_PARAMS(hr));
-
   if (addr < hr->next_top_at_mark_start()) {
     if (!_nextMarkBitMap->isMarked(addr)) {
       par_mark_and_count(obj, word_size, hr, worker_id);
--- a/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -499,18 +499,14 @@
   return _next_offset_threshold;
 }
 
-void
-G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
-  assert(new_top <= _end, "_end should have already been updated");
-
+void G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top) {
   // The first BOT entry should have offset 0.
   reset_bot();
-  alloc_block(_bottom, new_top);
+  alloc_block(_bottom, obj_top);
  }
 
 #ifndef PRODUCT
-void
-G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
+void G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
   G1BlockOffsetArray::print_on(out);
   out->print_cr("  next offset threshold: " PTR_FORMAT, p2i(_next_offset_threshold));
   out->print_cr("  next offset index:     " SIZE_FORMAT, _next_offset_index);
--- a/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -361,17 +361,18 @@
   // implementation, that's true because NULL is represented as 0, and thus
   // never exceeds the "_next_offset_threshold".
   void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
-    if (blk_end > _next_offset_threshold)
+    if (blk_end > _next_offset_threshold) {
       alloc_block_work1(blk_start, blk_end);
+    }
   }
   void alloc_block(HeapWord* blk, size_t size) {
-     alloc_block(blk, blk+size);
+    alloc_block(blk, blk+size);
   }
 
   HeapWord* block_start_unsafe(const void* addr);
   HeapWord* block_start_unsafe_const(const void* addr) const;
 
-  void set_for_starts_humongous(HeapWord* new_top);
+  void set_for_starts_humongous(HeapWord* obj_top);
 
   virtual void print_on(outputStream* out) PRODUCT_RETURN;
 };
--- a/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -123,7 +123,6 @@
     // to go back by.
     size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
     q -= (N_words * n_cards_back);
-    assert(q >= gsp()->bottom(), "Went below bottom!");
     index -= n_cards_back;
     offset = _array->offset_array(index);
   }
--- a/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -36,7 +36,7 @@
   T oop_or_narrowoop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(oop_or_narrowoop)) {
     oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
-    HeapRegion* hr = _g1h->heap_region_containing_raw(o);
+    HeapRegion* hr = _g1h->heap_region_containing(o);
     assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
     hr->add_strong_code_root(_nm);
   }
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -320,12 +320,8 @@
   // The header of the new object will be placed at the bottom of
   // the first region.
   HeapWord* new_obj = first_hr->bottom();
-  // This will be the new end of the first region in the series that
-  // should also match the end of the last region in the series.
-  HeapWord* new_end = new_obj + word_size_sum;
-  // This will be the new top of the first region that will reflect
-  // this allocation.
-  HeapWord* new_top = new_obj + word_size;
+  // This will be the new top of the new object.
+  HeapWord* obj_top = new_obj + word_size;
 
   // First, we need to zero the header of the space that we will be
   // allocating. When we update top further down, some refinement
@@ -346,7 +342,7 @@
   // will also update the BOT covering all the regions to reflect
   // that there is a single object that starts at the bottom of the
   // first region.
-  first_hr->set_starts_humongous(new_top, new_end);
+  first_hr->set_starts_humongous(obj_top);
   first_hr->set_allocation_context(context);
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
@@ -356,9 +352,6 @@
     hr->set_continues_humongous(first_hr);
     hr->set_allocation_context(context);
   }
-  // If we have "continues humongous" regions (hr != NULL), then the
-  // end of the last one should match new_end.
-  assert(hr == NULL || hr->end() == new_end, "sanity");
 
   // Up to this point no concurrent thread would have been able to
   // do any scanning on any region in this series. All the top
@@ -371,58 +364,39 @@
 
   // Now that the BOT and the object header have been initialized,
   // we can update top of the "starts humongous" region.
-  assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
-         "new_top should be in this region");
-  first_hr->set_top(new_top);
+  first_hr->set_top(MIN2(first_hr->end(), obj_top));
   if (_hr_printer.is_active()) {
-    HeapWord* bottom = first_hr->bottom();
-    HeapWord* end = first_hr->orig_end();
-    if ((first + 1) == last) {
-      // the series has a single humongous region
-      _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
-    } else {
-      // the series has more than one humongous regions
-      _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
-    }
+    _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->top());
   }
 
   // Now, we will update the top fields of the "continues humongous"
-  // regions. The reason we need to do this is that, otherwise,
-  // these regions would look empty and this will confuse parts of
-  // G1. For example, the code that looks for a consecutive number
-  // of empty regions will consider them empty and try to
-  // re-allocate them. We can extend is_empty() to also include
-  // !is_continues_humongous(), but it is easier to just update the top
-  // fields here. The way we set top for all regions (i.e., top ==
-  // end for all regions but the last one, top == new_top for the
-  // last one) is actually used when we will free up the humongous
-  // region in free_humongous_region().
+  // regions.
   hr = NULL;
   for (uint i = first + 1; i < last; ++i) {
     hr = region_at(i);
     if ((i + 1) == last) {
       // last continues humongous region
-      assert(hr->bottom() < new_top && new_top <= hr->end(),
+      assert(hr->bottom() < obj_top && obj_top <= hr->end(),
              "new_top should fall on this region");
-      hr->set_top(new_top);
-      _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
+      hr->set_top(obj_top);
+      _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, obj_top);
     } else {
       // not last one
-      assert(new_top > hr->end(), "new_top should be above this region");
+      assert(obj_top > hr->end(), "obj_top should be above this region");
       hr->set_top(hr->end());
       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
     }
   }
-  // If we have continues humongous regions (hr != NULL), then the
-  // end of the last one should match new_end and its top should
-  // match new_top.
-  assert(hr == NULL ||
-         (hr->end() == new_end && hr->top() == new_top), "sanity");
+  // If we have continues humongous regions (hr != NULL), its top should
+  // match obj_top.
+  assert(hr == NULL || (hr->top() == obj_top), "sanity");
   check_bitmaps("Humongous Region Allocation", first_hr);
 
-  assert(first_hr->used() == word_size * HeapWordSize, "invariant");
-  increase_used(first_hr->used());
-  _humongous_set.add(first_hr);
+  increase_used(word_size * HeapWordSize);
+
+  for (uint i = first; i < last; ++i) {
+    _humongous_set.add(region_at(i));
+  }
 
   return new_obj;
 }
@@ -1139,15 +1113,15 @@
   bool doHeapRegion(HeapRegion* r) {
     HeapRegionRemSet* hrrs = r->rem_set();
 
+    _g1h->reset_gc_time_stamps(r);
+
     if (r->is_continues_humongous()) {
       // We'll assert that the strong code root list and RSet is empty
       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
       assert(hrrs->occupied() == 0, "RSet should be empty");
-      return false;
+    } else {
+      hrrs->clear();
     }
-
-    _g1h->reset_gc_time_stamps(r);
-    hrrs->clear();
     // You might think here that we could clear just the cards
     // corresponding to the used region.  But no: if we leave a dirty card
     // in a region we might allocate into, then it would prevent that card
@@ -1205,12 +1179,7 @@
     if (hr->is_free()) {
       // We only generate output for non-empty regions.
     } else if (hr->is_starts_humongous()) {
-      if (hr->region_num() == 1) {
-        // single humongous region
-        _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
-      } else {
-        _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
-      }
+      _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
     } else if (hr->is_continues_humongous()) {
       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
     } else if (hr->is_archive()) {
@@ -2217,17 +2186,7 @@
 }
 
 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
-  assert(!hr->is_continues_humongous(), "pre-condition");
   hr->reset_gc_time_stamp();
-  if (hr->is_starts_humongous()) {
-    uint first_index = hr->hrm_index() + 1;
-    uint last_index = hr->last_hc_index();
-    for (uint i = first_index; i < last_index; i += 1) {
-      HeapRegion* chr = region_at(i);
-      assert(chr->is_continues_humongous(), "sanity");
-      chr->reset_gc_time_stamp();
-    }
-  }
 }
 
 #ifndef PRODUCT
@@ -2295,9 +2254,7 @@
 public:
   SumUsedClosure() : _used(0) {}
   bool doHeapRegion(HeapRegion* r) {
-    if (!r->is_continues_humongous()) {
-      _used += r->used();
-    }
+    _used += r->used();
     return false;
   }
   size_t result() { return _used; }
@@ -2518,9 +2475,9 @@
 bool G1CollectedHeap::is_in(const void* p) const {
   if (_hrm.reserved().contains(p)) {
     // Given that we know that p is in the reserved space,
-    // heap_region_containing_raw() should successfully
+    // heap_region_containing() should successfully
     // return the containing region.
-    HeapRegion* hr = heap_region_containing_raw(p);
+    HeapRegion* hr = heap_region_containing(p);
     return hr->is_in(p);
   } else {
     return false;
@@ -3057,7 +3014,7 @@
       r->verify(_vo, &failures);
       if (failures) {
         _failures = true;
-      } else {
+      } else if (!r->is_starts_humongous()) {
         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
         r->object_iterate(&not_dead_yet_cl);
         if (_vo != VerifyOption_G1UseNextMarking) {
@@ -5309,30 +5266,16 @@
 }
 
 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
-                                     FreeRegionList* free_list,
-                                     bool par) {
-  assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
+                                            FreeRegionList* free_list,
+                                            bool par) {
+  assert(hr->is_humongous(), "this is only for humongous regions");
   assert(free_list != NULL, "pre-condition");
-
-  size_t hr_capacity = hr->capacity();
-  // We need to read this before we make the region non-humongous,
-  // otherwise the information will be gone.
-  uint last_index = hr->last_hc_index();
   hr->clear_humongous();
   free_region(hr, free_list, par);
-
-  uint i = hr->hrm_index() + 1;
-  while (i < last_index) {
-    HeapRegion* curr_hr = region_at(i);
-    assert(curr_hr->is_continues_humongous(), "invariant");
-    curr_hr->clear_humongous();
-    free_region(curr_hr, free_list, par);
-    i += 1;
-  }
 }
 
 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
-                                       const HeapRegionSetCount& humongous_regions_removed) {
+                                           const HeapRegionSetCount& humongous_regions_removed) {
   if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
     _old_set.bulk_remove(old_regions_removed);
@@ -5492,8 +5435,6 @@
   bool failures() { return _failures; }
 
   virtual bool doHeapRegion(HeapRegion* hr) {
-    if (hr->is_continues_humongous()) return false;
-
     bool result = _g1h->verify_bitmaps(_caller, hr);
     if (!result) {
       _failures = true;
@@ -5767,11 +5708,10 @@
         !r->rem_set()->is_empty()) {
 
       if (G1TraceEagerReclaimHumongousObjects) {
-        gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
+        gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
                                region_idx,
                                (size_t)obj->size() * HeapWordSize,
                                p2i(r->bottom()),
-                               r->region_num(),
                                r->rem_set()->occupied(),
                                r->rem_set()->strong_code_roots_list_length(),
                                next_bitmap->isMarked(r->bottom()),
@@ -5788,11 +5728,10 @@
               PTR_FORMAT " is not.", p2i(r->bottom()));
 
     if (G1TraceEagerReclaimHumongousObjects) {
-      gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
+      gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
                              region_idx,
                              (size_t)obj->size() * HeapWordSize,
                              p2i(r->bottom()),
-                             r->region_num(),
                              r->rem_set()->occupied(),
                              r->rem_set()->strong_code_roots_list_length(),
                              next_bitmap->isMarked(r->bottom()),
@@ -5804,10 +5743,14 @@
     if (next_bitmap->isMarked(r->bottom())) {
       next_bitmap->clear(r->bottom());
     }
-    _freed_bytes += r->used();
-    r->set_containing_set(NULL);
-    _humongous_regions_removed.increment(1u, r->capacity());
-    g1h->free_humongous_region(r, _free_region_list, false);
+    do {
+      HeapRegion* next = g1h->next_region_in_humongous(r);
+      _freed_bytes += r->used();
+      r->set_containing_set(NULL);
+      _humongous_regions_removed.increment(1u, r->capacity());
+      g1h->free_humongous_region(r, _free_region_list, false);
+      r = next;
+    } while (r != NULL);
 
     return false;
   }
@@ -6042,10 +5985,6 @@
   }
 
   bool doHeapRegion(HeapRegion* r) {
-    if (r->is_continues_humongous()) {
-      return false;
-    }
-
     if (r->is_empty()) {
       // Add free regions to the free list
       r->set_free();
@@ -6233,14 +6172,10 @@
     _old_count(), _humongous_count(), _free_count(){ }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (hr->is_continues_humongous()) {
-      return false;
-    }
-
     if (hr->is_young()) {
       // TODO
-    } else if (hr->is_starts_humongous()) {
-      assert(hr->containing_set() == _humongous_set, "Heap region %u is starts humongous but not in humongous set.", hr->hrm_index());
+    } else if (hr->is_humongous()) {
+      assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
       _humongous_count.increment(1u, hr->capacity());
     } else if (hr->is_empty()) {
       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -1178,7 +1178,6 @@
   void prepend_to_freelist(FreeRegionList* list);
   void decrement_summary_bytes(size_t bytes);
 
-  // Returns "TRUE" iff "p" points into the committed areas of the heap.
   virtual bool is_in(const void* p) const;
 #ifdef ASSERT
   // Returns whether p is in one of the available areas of the heap. Slow but
@@ -1243,6 +1242,10 @@
   // Return the region with the given index. It assumes the index is valid.
   inline HeapRegion* region_at(uint index) const;
 
+  // Return the next region (by index) that is part of the same
+  // humongous object that hr is part of.
+  inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
+
   // Calculate the region index of the given address. Given address must be
   // within the heap.
   inline uint addr_to_region(HeapWord* addr) const;
@@ -1280,11 +1283,6 @@
 
   // Returns the HeapRegion that contains addr. addr must not be NULL.
   template <class T>
-  inline HeapRegion* heap_region_containing_raw(const T addr) const;
-
-  // Returns the HeapRegion that contains addr. addr must not be NULL.
-  // If addr is within a humongous continues region, it returns its humongous start region.
-  template <class T>
   inline HeapRegion* heap_region_containing(const T addr) const;
 
   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -65,6 +65,10 @@
 // Return the region with the given index. It assumes the index is valid.
 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
 
+inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
+  return _hrm.next_region_in_humongous(hr);
+}
+
 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
   assert(is_in_reserved(addr),
          "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
@@ -77,7 +81,7 @@
 }
 
 template <class T>
-inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
+inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
   assert(addr != NULL, "invariant");
   assert(is_in_g1_reserved((const void*) addr),
          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
@@ -85,15 +89,6 @@
   return _hrm.addr_to_region((HeapWord*) addr);
 }
 
-template <class T>
-inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
-  HeapRegion* hr = heap_region_containing_raw(addr);
-  if (hr->is_continues_humongous()) {
-    return hr->humongous_start_region();
-  }
-  return hr;
-}
-
 inline void G1CollectedHeap::reset_gc_time_stamp() {
   _gc_time_stamp = 0;
   OrderAccess::fence();
@@ -124,9 +119,9 @@
   assert_heap_not_locked();
 
   // Assign the containing region to containing_hr so that we don't
-  // have to keep calling heap_region_containing_raw() in the
+  // have to keep calling heap_region_containing() in the
   // asserts below.
-  DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
+  DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
   assert(word_size > 0, "pre-condition");
   assert(containing_hr->is_in(start), "it should contain start");
   assert(containing_hr->is_young(), "it should be young");
--- a/hotspot/src/share/vm/gc/g1/g1HRPrinter.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1HRPrinter.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -51,7 +51,6 @@
     case Eden:               return "Eden";
     case Survivor:           return "Survivor";
     case Old:                return "Old";
-    case SingleHumongous:    return "SingleH";
     case StartsHumongous:    return "StartsH";
     case ContinuesHumongous: return "ContinuesH";
     case Archive:            return "Archive";
--- a/hotspot/src/share/vm/gc/g1/g1HRPrinter.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1HRPrinter.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -50,7 +50,6 @@
     Eden,
     Survivor,
     Old,
-    SingleHumongous,
     StartsHumongous,
     ContinuesHumongous,
     Archive
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -279,8 +279,8 @@
         } else {
           assert(hr->is_empty(), "Should have been cleared in phase 2.");
         }
-        hr->reset_during_compaction();
       }
+      hr->reset_during_compaction();
     } else if (!hr->is_pinned()) {
       hr->compact();
     }
@@ -334,9 +334,6 @@
   HeapWord* end = hr->end();
   FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
 
-  assert(hr->is_starts_humongous(),
-         "Only the start of a humongous region should be freed.");
-
   hr->set_containing_set(NULL);
   _humongous_regions_removed.increment(1u, hr->capacity());
 
@@ -373,15 +370,12 @@
 
 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
   if (hr->is_humongous()) {
-    if (hr->is_starts_humongous()) {
-      oop obj = oop(hr->bottom());
-      if (obj->is_gc_marked()) {
-        obj->forward_to(obj);
-      } else  {
-        free_humongous_region(hr);
-      }
-    } else {
-      assert(hr->is_continues_humongous(), "Invalid humongous.");
+    oop obj = oop(hr->humongous_start_region()->bottom());
+    if (hr->is_starts_humongous() && obj->is_gc_marked()) {
+      obj->forward_to(obj);
+    }
+    if (!obj->is_gc_marked()) {
+      free_humongous_region(hr);
     }
   } else if (!hr->is_pinned()) {
     prepare_for_compaction(hr, hr->end());
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -222,7 +222,7 @@
 
 template <class T>
 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
-  if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
+  if (_g1->heap_region_containing(new_obj)->is_young()) {
     _scanned_klass->record_modified_oops();
   }
 }
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -216,7 +216,7 @@
                                                  oop const old,
                                                  markOop const old_mark) {
   const size_t word_sz = old->size();
-  HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
+  HeapRegion* const from_region = _g1h->heap_region_containing(old);
   // +1 to make the -1 indexes valid...
   const int young_index = from_region->young_index_in_cset()+1;
   assert( (from_region->is_young() && young_index >  0) ||
@@ -294,9 +294,9 @@
     if (G1StringDedup::is_enabled()) {
       const bool is_from_young = state.is_young();
       const bool is_to_young = dest_state.is_young();
-      assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
+      assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
              "sanity");
-      assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
+      assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
              "sanity");
       G1StringDedup::enqueue_from_evacuation(is_from_young,
                                              is_to_young,
@@ -314,7 +314,7 @@
       oop* old_p = set_partial_array_mask(old);
       push_on_queue(old_p);
     } else {
-      HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
+      HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
       _scanner.set_region(to_region);
       obj->oop_iterate_backwards(&_scanner);
     }
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -101,7 +101,7 @@
     // so that the heap remains parsable in case of evacuation failure.
     to_obj_array->set_length(end);
   }
-  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
+  _scanner.set_region(_g1h->heap_region_containing(to_obj));
   // Process indexes [start,end). It will also process the header
   // along with the first chunk (i.e., the chunk with start == 0).
   // Note that at this point the length field of to_obj_array is not
@@ -115,10 +115,7 @@
 
 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
   if (!has_partial_array_mask(ref_to_scan)) {
-    // Note: we can use "raw" versions of "region_containing" because
-    // "obj_to_scan" is definitely in the heap, and is not in a
-    // humongous region.
-    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
+    HeapRegion* r = _g1h->heap_region_containing(ref_to_scan);
     do_oop_evac(ref_to_scan, r);
   } else {
     do_oop_partial_array((oop*)ref_to_scan);
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -60,7 +60,7 @@
   assert(_g1->is_in_reserved(obj), "must be in heap");
 #endif // ASSERT
 
-  assert(from == NULL || from->is_in_reserved(p), "p is not in from");
+  assert(from->is_in_reserved(p) || from->is_starts_humongous(), "p is not in from");
 
   HeapRegion* to = _g1->heap_region_containing(obj);
   if (from != to) {
--- a/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -52,7 +52,7 @@
 
 bool G1StringDedup::is_candidate_from_mark(oop obj) {
   if (java_lang_String::is_instance_inlined(obj)) {
-    bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young();
+    bool from_young = G1CollectedHeap::heap()->heap_region_containing(obj)->is_young();
     if (from_young && obj->age() < StringDeduplicationAgeThreshold) {
       // Candidate found. String is being evacuated from young to old but has not
       // reached the deduplication age threshold, i.e. has not previously been a
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -67,7 +67,7 @@
   // not considered dead, either because it is marked (in the mark bitmap)
   // or it was allocated after marking finished, then we add it. Otherwise
   // we can safely ignore the object.
-  if (!g1h->is_obj_dead(oop(cur), _hr)) {
+  if (!g1h->is_obj_dead(oop(cur))) {
     oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
   } else {
     oop_size = _hr->block_size(cur);
@@ -81,7 +81,7 @@
     HeapWord* next_obj = cur + oop_size;
     while (next_obj < top) {
       // Keep filtering the remembered set.
-      if (!g1h->is_obj_dead(cur_oop, _hr)) {
+      if (!g1h->is_obj_dead(cur_oop)) {
         // Bottom lies entirely below top, so we can call the
         // non-memRegion version of oop_iterate below.
         cur_oop->oop_iterate(_rs_scan);
@@ -93,7 +93,7 @@
     }
 
     // Last object. Need to do dead-obj filtering here too.
-    if (!g1h->is_obj_dead(oop(cur), _hr)) {
+    if (!g1h->is_obj_dead(oop(cur))) {
       oop(cur)->oop_iterate(_rs_scan, mr);
     }
   }
@@ -162,8 +162,6 @@
 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
   assert(_humongous_start_region == NULL,
          "we should have already filtered out humongous regions");
-  assert(_end == orig_end(),
-         "we should have already filtered out humongous regions");
   assert(!in_collection_set(),
          "Should not clear heap region %u in the collection set", hrm_index());
 
@@ -213,24 +211,18 @@
   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 }
 
-void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
+void HeapRegion::set_starts_humongous(HeapWord* obj_top) {
   assert(!is_humongous(), "sanity / pre-condition");
-  assert(end() == orig_end(),
-         "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
-  assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 
   _type.set_starts_humongous();
   _humongous_start_region = this;
 
-  set_end(new_end);
-  _offsets.set_for_starts_humongous(new_top);
+  _offsets.set_for_starts_humongous(obj_top);
 }
 
 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
   assert(!is_humongous(), "sanity / pre-condition");
-  assert(end() == orig_end(),
-         "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
   assert(first_hr->is_starts_humongous(), "pre-condition");
 
@@ -241,18 +233,6 @@
 void HeapRegion::clear_humongous() {
   assert(is_humongous(), "pre-condition");
 
-  if (is_starts_humongous()) {
-    assert(top() <= end(), "pre-condition");
-    set_end(orig_end());
-    if (top() > end()) {
-      // at least one "continues humongous" region after it
-      set_top(end());
-    }
-  } else {
-    // continues humongous
-    assert(end() == orig_end(), "sanity");
-  }
-
   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
   _humongous_start_region = NULL;
 }
@@ -290,11 +270,6 @@
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
   record_timestamp();
-
-  assert(mr.end() == orig_end(),
-         "Given region end address " PTR_FORMAT " should match exactly "
-         "bottom plus one region size, i.e. " PTR_FORMAT,
-         p2i(mr.end()), p2i(orig_end()));
 }
 
 CompactibleSpace* HeapRegion::next_compaction_space() const {
@@ -832,7 +807,14 @@
     _offsets.verify();
   }
 
-  if (p != top()) {
+  if (is_region_humongous) {
+    oop obj = oop(this->humongous_start_region()->bottom());
+    if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
+      gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
+    }
+  }
+
+  if (!is_region_humongous && p != top()) {
     gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
                            "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
     *failures = true;
@@ -840,7 +822,6 @@
   }
 
   HeapWord* the_end = end();
-  assert(p == top(), "it should still hold");
   // Do some extra BOT consistency checking for addresses in the
   // range [top, end). BOT look-ups in this range should yield
   // top. No point in doing that if top == end (there's nothing there).
@@ -931,6 +912,7 @@
 }
 
 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
+  assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords");
   Space::set_end(new_end);
   _offsets.resize(new_end - bottom());
 }
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -43,6 +43,15 @@
 // The solution is to remove this method from the definition
 // of a Space.
 
+// Each heap region is self contained. top() and end() can never
+// be set beyond the end of the region. For humongous objects,
+// the first region is a StartsHumongous region. If the humongous
+// object is larger than a heap region, the following regions will
+// be of type ContinuesHumongous. In this case the top() of the
+// StartHumongous region and all ContinuesHumongous regions except
+// the last will point to their own end. For the last ContinuesHumongous
+// region, top() will equal the object's top.
+
 class G1CollectedHeap;
 class HeapRegionRemSet;
 class HeapRegionRemSetIterator;
@@ -389,8 +398,6 @@
   size_t garbage_bytes() {
     size_t used_at_mark_start_bytes =
       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
-    assert(used_at_mark_start_bytes >= marked_bytes(),
-           "Can't mark more than we have.");
     return used_at_mark_start_bytes - marked_bytes();
   }
 
@@ -409,7 +416,6 @@
 
   void add_to_marked_bytes(size_t incr_bytes) {
     _next_marked_bytes = _next_marked_bytes + incr_bytes;
-    assert(_next_marked_bytes <= used(), "invariant" );
   }
 
   void zero_marked_bytes()      {
@@ -445,57 +451,13 @@
     return _humongous_start_region;
   }
 
-  // Return the number of distinct regions that are covered by this region:
-  // 1 if the region is not humongous, >= 1 if the region is humongous.
-  uint region_num() const {
-    if (!is_humongous()) {
-      return 1U;
-    } else {
-      assert(is_starts_humongous(), "doesn't make sense on HC regions");
-      assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
-      return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
-    }
-  }
-
-  // Return the index + 1 of the last HC regions that's associated
-  // with this HS region.
-  uint last_hc_index() const {
-    assert(is_starts_humongous(), "don't call this otherwise");
-    return hrm_index() + region_num();
-  }
-
-  // Same as Space::is_in_reserved, but will use the original size of the region.
-  // The original size is different only for start humongous regions. They get
-  // their _end set up to be the end of the last continues region of the
-  // corresponding humongous object.
-  bool is_in_reserved_raw(const void* p) const {
-    return _bottom <= p && p < orig_end();
-  }
-
   // Makes the current region be a "starts humongous" region, i.e.,
   // the first region in a series of one or more contiguous regions
-  // that will contain a single "humongous" object. The two parameters
-  // are as follows:
-  //
-  // new_top : The new value of the top field of this region which
-  // points to the end of the humongous object that's being
-  // allocated. If there is more than one region in the series, top
-  // will lie beyond this region's original end field and on the last
-  // region in the series.
+  // that will contain a single "humongous" object.
   //
-  // new_end : The new value of the end field of this region which
-  // points to the end of the last region in the series. If there is
-  // one region in the series (namely: this one) end will be the same
-  // as the original end of this region.
-  //
-  // Updating top and end as described above makes this region look as
-  // if it spans the entire space taken up by all the regions in the
-  // series and an single allocation moved its top to new_top. This
-  // ensures that the space (capacity / allocated) taken up by all
-  // humongous regions can be calculated by just looking at the
-  // "starts humongous" regions and by ignoring the "continues
-  // humongous" regions.
-  void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
+  // obj_top : points to the end of the humongous object that's being
+  // allocated.
+  void set_starts_humongous(HeapWord* obj_top);
 
   // Makes the current region be a "continues humongous'
   // region. first_hr is the "start humongous" region of the series
@@ -566,9 +528,6 @@
   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 
-  // For the start region of a humongous sequence, it's original end().
-  HeapWord* orig_end() const { return _bottom + GrainWords; }
-
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space, bool locked = false);
   void par_clear();
@@ -614,8 +573,8 @@
   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
 
   void reset_during_compaction() {
-    assert(is_starts_humongous(),
-           "should only be called for starts humongous regions");
+    assert(is_humongous(),
+           "should only be called for humongous regions");
 
     zero_marked_bytes();
     init_top_at_mark_start();
--- a/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -115,6 +115,11 @@
 inline bool
 HeapRegion::block_is_obj(const HeapWord* p) const {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  if (!this->is_in(p)) {
+    assert(is_continues_humongous(), "This case can only happen for humongous regions");
+    return (p == humongous_start_region()->bottom());
+  }
   if (ClassUnloadingWithConcurrentMark) {
     return !g1h->is_obj_dead(oop(p), this);
   }
@@ -176,10 +181,6 @@
   _prev_top_at_mark_start = _next_top_at_mark_start;
   _prev_marked_bytes = _next_marked_bytes;
   _next_marked_bytes = 0;
-
-  assert(_prev_marked_bytes <=
-         (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
-         HeapWordSize, "invariant");
 }
 
 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -343,63 +343,18 @@
       continue;
     }
     HeapRegion* r = _regions.get_by_index(index);
-    // We'll ignore "continues humongous" regions (we'll process them
-    // when we come across their corresponding "start humongous"
-    // region) and regions already claimed.
+    // We'll ignore regions already claimed.
     // However, if the iteration is specified as concurrent, the values for
     // is_starts_humongous and is_continues_humongous can not be trusted,
     // and we should just blindly iterate over regions regardless of their
     // humongous status.
-    if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
+    if (hrclaimer->is_region_claimed(index)) {
       continue;
     }
     // OK, try to claim it
     if (!hrclaimer->claim_region(index)) {
       continue;
     }
-    // Success!
-    // As mentioned above, special treatment of humongous regions can only be
-    // done if we are iterating non-concurrently.
-    if (!concurrent && r->is_starts_humongous()) {
-      // If the region is "starts humongous" we'll iterate over its
-      // "continues humongous" first; in fact we'll do them
-      // first. The order is important. In one case, calling the
-      // closure on the "starts humongous" region might de-allocate
-      // and clear all its "continues humongous" regions and, as a
-      // result, we might end up processing them twice. So, we'll do
-      // them first (note: most closures will ignore them anyway) and
-      // then we'll do the "starts humongous" region.
-      for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
-        HeapRegion* chr = _regions.get_by_index(ch_index);
-
-        assert(chr->is_continues_humongous(), "Must be humongous region");
-        assert(chr->humongous_start_region() == r,
-               "Must work on humongous continuation of the original start region "
-               PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr));
-        assert(!hrclaimer->is_region_claimed(ch_index),
-               "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
-
-        // Claim the region so no other worker tries to process the region. When a worker processes a
-        // starts_humongous region it may also process the associated continues_humongous regions.
-        // The continues_humongous regions can be changed to free regions. Unless this worker claims
-        // all of these regions, other workers might try claim and process these newly free regions.
-        bool claim_result = hrclaimer->claim_region(ch_index);
-        guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
-
-        bool res2 = blk->doHeapRegion(chr);
-        if (res2) {
-          return;
-        }
-
-        // Right now, this holds (i.e., no closure that actually
-        // does something with "continues humongous" regions
-        // clears them). We might have to weaken it in the future,
-        // but let's leave these two asserts here for extra safety.
-        assert(chr->is_continues_humongous(), "should still be the case");
-        assert(chr->humongous_start_region() == r, "sanity");
-      }
-    }
-
     bool res = blk->doHeapRegion(r);
     if (res) {
       return;
@@ -508,11 +463,7 @@
     // this method may be called, we have only completed allocation of the regions,
     // but not put into a region set.
     prev_committed = true;
-    if (hr->is_starts_humongous()) {
-      prev_end = hr->orig_end();
-    } else {
-      prev_end = hr->end();
-    }
+    prev_end = hr->end();
   }
   for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
     guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -150,6 +150,10 @@
   // is valid.
   inline HeapRegion* at(uint index) const;
 
+  // Return the next region (by index) that is part of the same
+  // humongous object that hr is part of.
+  inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
+
   // If addr is within the committed space return its corresponding
   // HeapRegion, otherwise return NULL.
   inline HeapRegion* addr_to_region(HeapWord* addr) const;
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.inline.hpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.inline.hpp	Mon Nov 09 09:19:39 2015 +0100
@@ -47,6 +47,18 @@
   return hr;
 }
 
+inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) const {
+  uint index = hr->hrm_index();
+  assert(is_available(index), "pre-condition");
+  assert(hr->is_humongous(), "next_region_in_humongous should only be called for a humongous region.");
+  index++;
+  if (index < max_length() && is_available(index) && at(index)->is_continues_humongous()) {
+    return at(index);
+  } else {
+    return NULL;
+  }
+}
+
 inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) {
   _free_list.add_ordered(hr);
 }
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -105,7 +105,7 @@
     // now reused for the corresponding start humongous region, we need to
     // make sure that we detect this. Thus, we call is_in_reserved_raw()
     // instead of just is_in_reserved() here.
-    if (loc_hr->is_in_reserved_raw(from)) {
+    if (loc_hr->is_in_reserved(from)) {
       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
       CardIdx_t from_card = (CardIdx_t)
           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
@@ -433,7 +433,7 @@
   }
 
   // Note that this may be a continued H region.
-  HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
+  HeapRegion* from_hr = _g1h->heap_region_containing(from);
   RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
 
   // If the region is already coarsened, return.
@@ -765,7 +765,7 @@
 }
 
 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
-  HeapRegion* hr = _g1h->heap_region_containing_raw(from);
+  HeapRegion* hr = _g1h->heap_region_containing(from);
   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
   // Is this region in the coarse map?
   if (_coarse_map.at(hr_ind)) return true;
--- a/hotspot/src/share/vm/gc/g1/satbQueue.cpp	Fri Nov 06 09:58:06 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/satbQueue.cpp	Mon Nov 09 09:19:39 2015 +0100
@@ -88,7 +88,7 @@
   assert(heap->is_in_reserved(entry),
          "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry));
 
-  HeapRegion* region = heap->heap_region_containing_raw(entry);
+  HeapRegion* region = heap->heap_region_containing(entry);
   assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry));
   if (entry >= region->next_top_at_mark_start()) {
     return false;