Merge
authorjwilhelm
Thu, 18 Sep 2014 18:19:44 +0200
changeset 26823 e4d9c51281a1
parent 26691 40ea2c41f53b (current diff)
parent 26703 a5739f4f8eca (diff)
child 26824 a04a1291103f
Merge
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/solaris/vm/os_solaris.hpp
hotspot/src/share/vm/runtime/os.hpp
hotspot/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java
hotspot/test/gc/g1/TestDeferredRSUpdate.java
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -4002,10 +4002,6 @@
   return true;
 }
 
-// int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
-//   return ::vsnprintf(buf, count, format, args);
-// }
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -3822,12 +3822,6 @@
   return true;
 }
 
-ATTRIBUTE_PRINTF(3, 0)
-int local_vsnprintf(char* buf, size_t count, const char* format,
-                    va_list args) {
-  return ::vsnprintf(buf, count, format, args);
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -5063,11 +5063,6 @@
   return true;
 }
 
-int local_vsnprintf(char* buf, size_t count, const char* format,
-                    va_list args) {
-  return ::vsnprintf(buf, count, format, args);
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -2539,29 +2539,30 @@
   }
 }
 
+size_t os::Solaris::page_size_for_alignment(size_t alignment) {
+  assert(is_size_aligned(alignment, (size_t) vm_page_size()),
+         err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
+                 alignment, (size_t) vm_page_size()));
+
+  for (int i = 0; _page_sizes[i] != 0; i++) {
+    if (is_size_aligned(alignment, _page_sizes[i])) {
+      return _page_sizes[i];
+    }
+  }
+
+  return (size_t) vm_page_size();
+}
+
 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
                                     size_t alignment_hint, bool exec) {
   int err = Solaris::commit_memory_impl(addr, bytes, exec);
-  if (err == 0) {
-    if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
-      // If the large page size has been set and the VM
-      // is using large pages, use the large page size
-      // if it is smaller than the alignment hint. This is
-      // a case where the VM wants to use a larger alignment size
-      // for its own reasons but still want to use large pages
-      // (which is what matters to setting the mpss range.
-      size_t page_size = 0;
-      if (large_page_size() < alignment_hint) {
-        assert(UseLargePages, "Expected to be here for large page use only");
-        page_size = large_page_size();
-      } else {
-        // If the alignment hint is less than the large page
-        // size, the VM wants a particular alignment (thus the hint)
-        // for internal reasons.  Try to set the mpss range using
-        // the alignment_hint.
-        page_size = alignment_hint;
-      }
-      // Since this is a hint, ignore any failures.
+  if (err == 0 && UseLargePages && alignment_hint > 0) {
+    assert(is_size_aligned(bytes, alignment_hint),
+           err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
+
+    // The syscall memcntl requires an exact page size (see man memcntl for details).
+    size_t page_size = page_size_for_alignment(alignment_hint);
+    if (page_size > (size_t) vm_page_size()) {
       (void)Solaris::setup_large_pages(addr, bytes, page_size);
     }
   }
@@ -3098,8 +3099,22 @@
   }
 }
 
-bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes,
-                                    size_t align) {
+bool os::Solaris::is_valid_page_size(size_t bytes) {
+  for (int i = 0; _page_sizes[i] != 0; i++) {
+    if (_page_sizes[i] == bytes) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
+  assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
+  assert(is_ptr_aligned((void*) start, align),
+         err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
+  assert(is_size_aligned(bytes, align),
+         err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
+
   // Signal to OS that we want large pages for addresses
   // from addr, addr + bytes
   struct memcntl_mha mpss_struct;
@@ -3114,8 +3129,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
-                                 bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
   fatal("os::reserve_memory_special should not be called on Solaris.");
   return NULL;
 }
@@ -4769,29 +4783,6 @@
 
 bool os::check_heap(bool force) { return true; }
 
-typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
-static vsnprintf_t sol_vsnprintf = NULL;
-
-int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
-  if (!sol_vsnprintf) {
-    //search  for the named symbol in the objects that were loaded after libjvm
-    void* where = RTLD_NEXT;
-    if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) {
-      sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
-    }
-    if (!sol_vsnprintf){
-      //search  for the named symbol in the objects that were loaded before libjvm
-      where = RTLD_DEFAULT;
-      if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) {
-        sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
-      }
-      assert(sol_vsnprintf != NULL, "vsnprintf not found");
-    }
-  }
-  return (*sol_vsnprintf)(buf, count, fmt, argptr);
-}
-
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -97,6 +97,8 @@
   static meminfo_func_t _meminfo;
 
   // Large Page Support
+  static bool is_valid_page_size(size_t bytes);
+  static size_t page_size_for_alignment(size_t alignment);
   static bool setup_large_pages(caddr_t start, size_t bytes, size_t align);
 
   static void init_thread_fpu_state(void);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -4737,7 +4737,7 @@
 }
 
 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
-  const char* type = "";
+  const char* type       = r->get_type_str();
   HeapWord* bottom       = r->bottom();
   HeapWord* end          = r->end();
   size_t capacity_bytes  = r->capacity();
@@ -4748,15 +4748,7 @@
   size_t remset_bytes    = r->rem_set()->mem_size();
   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
 
-  if (r->used() == 0) {
-    type = "FREE";
-  } else if (r->is_survivor()) {
-    type = "SURV";
-  } else if (r->is_young()) {
-    type = "EDEN";
-  } else if (r->startsHumongous()) {
-    type = "HUMS";
-
+  if (r->startsHumongous()) {
     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
            "they should have been zeroed after the last time we used them");
@@ -4769,12 +4761,9 @@
                   &prev_live_bytes, &next_live_bytes);
     end = bottom + HeapRegion::GrainWords;
   } else if (r->continuesHumongous()) {
-    type = "HUMC";
     get_hum_bytes(&used_bytes, &capacity_bytes,
                   &prev_live_bytes, &next_live_bytes);
     assert(end == bottom + HeapRegion::GrainWords, "invariant");
-  } else {
-    type = "OLD";
   }
 
   _total_used_bytes      += used_bytes;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -211,7 +211,10 @@
     HeapRegion* next = list->get_next_young_region();
     list->set_next_young_region(NULL);
     list->uninstall_surv_rate_group();
-    list->set_not_young();
+    // This is called before a Full GC and all the non-empty /
+    // non-humongous regions at the end of the Full GC will end up as
+    // old anyway.
+    list->set_old();
     list = next;
   }
 }
@@ -370,7 +373,7 @@
     if (curr == NULL)
       gclog_or_tty->print_cr("  empty");
     while (curr != NULL) {
-      gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
+      gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
                              HR_FORMAT_PARAMS(curr),
                              curr->prev_top_at_mark_start(),
                              curr->next_top_at_mark_start(),
@@ -802,6 +805,7 @@
 #ifdef ASSERT
       for (uint i = first; i < first + obj_regions; ++i) {
         HeapRegion* hr = region_at(i);
+        assert(hr->is_free(), "sanity");
         assert(hr->is_empty(), "sanity");
         assert(is_on_master_free_list(hr), "sanity");
       }
@@ -1225,21 +1229,21 @@
 public:
   bool doHeapRegion(HeapRegion* hr) {
     assert(!hr->is_young(), "not expecting to find young regions");
-    // We only generate output for non-empty regions.
-    if (!hr->is_empty()) {
-      if (!hr->isHumongous()) {
-        _hr_printer->post_compaction(hr, G1HRPrinter::Old);
-      } else if (hr->startsHumongous()) {
-        if (hr->region_num() == 1) {
-          // single humongous region
-          _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
-        } else {
-          _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
-        }
+    if (hr->is_free()) {
+      // We only generate output for non-empty regions.
+    } else if (hr->startsHumongous()) {
+      if (hr->region_num() == 1) {
+        // single humongous region
+        _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
       } else {
-        assert(hr->continuesHumongous(), "only way to get here");
-        _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
+        _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
       }
+    } else if (hr->continuesHumongous()) {
+      _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
+    } else if (hr->is_old()) {
+      _hr_printer->post_compaction(hr, G1HRPrinter::Old);
+    } else {
+      ShouldNotReachHere();
     }
     return false;
   }
@@ -1477,9 +1481,7 @@
 
       // Discard all rset updates
       JavaThread::dirty_card_queue_set().abandon_logs();
-      assert(!G1DeferredRSUpdate
-             || (G1DeferredRSUpdate &&
-                (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
+      assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
 
       _young_list->reset_sampled_info();
       // At this point there should be no regions in the
@@ -2090,15 +2092,13 @@
                                                 concurrent_g1_refine()->red_zone(),
                                                 Shared_DirtyCardQ_lock);
 
-  if (G1DeferredRSUpdate) {
-    dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
-                                      DirtyCardQ_CBL_mon,
-                                      DirtyCardQ_FL_lock,
-                                      -1, // never trigger processing
-                                      -1, // no limit on length
-                                      Shared_DirtyCardQ_lock,
-                                      &JavaThread::dirty_card_queue_set());
-  }
+  dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
+                                    DirtyCardQ_CBL_mon,
+                                    DirtyCardQ_FL_lock,
+                                    -1, // never trigger processing
+                                    -1, // no limit on length
+                                    Shared_DirtyCardQ_lock,
+                                    &JavaThread::dirty_card_queue_set());
 
   // Initialize the card queue set used to hold cards containing
   // references into the collection set.
@@ -2121,8 +2121,8 @@
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
-  // BOT updates. So we'll tag the dummy region as young to avoid that.
-  dummy_region->set_young();
+  // BOT updates. So we'll tag the dummy region as eden to avoid that.
+  dummy_region->set_eden();
   // Make sure it's full.
   dummy_region->set_top(dummy_region->end());
   G1AllocRegion::setup(this, dummy_region);
@@ -4031,14 +4031,6 @@
         if (_hr_printer.is_active()) {
           HeapRegion* hr = g1_policy()->collection_set();
           while (hr != NULL) {
-            G1HRPrinter::RegionType type;
-            if (!hr->is_young()) {
-              type = G1HRPrinter::Old;
-            } else if (hr->is_survivor()) {
-              type = G1HRPrinter::Survivor;
-            } else {
-              type = G1HRPrinter::Eden;
-            }
             _hr_printer.cset(hr);
             hr = hr->next_in_collection_set();
           }
@@ -5393,7 +5385,6 @@
 };
 
 void G1CollectedHeap::redirty_logged_cards() {
-  guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
   double redirty_logged_cards_start = os::elapsedTime();
 
   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
@@ -5448,9 +5439,10 @@
   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
   void do_oop(oop* p) {
     oop obj = *p;
+    assert(obj != NULL, "the caller should have filtered out NULL values");
 
     G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
-    if (obj == NULL || cset_state == G1CollectedHeap::InNeither) {
+    if (cset_state == G1CollectedHeap::InNeither) {
       return;
     }
     if (cset_state == G1CollectedHeap::InCSet) {
@@ -6052,9 +6044,7 @@
   // RSets.
   enqueue_discovered_references(n_workers);
 
-  if (G1DeferredRSUpdate) {
-    redirty_logged_cards();
-  }
+  redirty_logged_cards();
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 }
 
@@ -6062,7 +6052,7 @@
                                   FreeRegionList* free_list,
                                   bool par,
                                   bool locked) {
-  assert(!hr->isHumongous(), "this is only for non-humongous regions");
+  assert(!hr->is_free(), "the region should not be free");
   assert(!hr->is_empty(), "the region should not be empty");
   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
@@ -6092,14 +6082,14 @@
   // We need to read this before we make the region non-humongous,
   // otherwise the information will be gone.
   uint last_index = hr->last_hc_index();
-  hr->set_notHumongous();
+  hr->clear_humongous();
   free_region(hr, free_list, par);
 
   uint i = hr->hrm_index() + 1;
   while (i < last_index) {
     HeapRegion* curr_hr = region_at(i);
     assert(curr_hr->continuesHumongous(), "invariant");
-    curr_hr->set_notHumongous();
+    curr_hr->clear_humongous();
     free_region(curr_hr, free_list, par);
     i += 1;
   }
@@ -6407,9 +6397,9 @@
       if (cur->is_young()) {
         cur->set_young_index_in_cset(-1);
       }
-      cur->set_not_young();
       cur->set_evacuation_failed(false);
       // The region is now considered to be old.
+      cur->set_old();
       _old_set.add(cur);
       evacuation_info.increment_collectionset_used_after(cur->used());
     }
@@ -6696,16 +6686,15 @@
   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
 
   bool doHeapRegion(HeapRegion* r) {
-    if (r->is_empty()) {
-      // We ignore empty regions, we'll empty the free list afterwards
-    } else if (r->is_young()) {
-      // We ignore young regions, we'll empty the young list afterwards
-    } else if (r->isHumongous()) {
+    if (r->is_old()) {
+      _old_set->remove(r);
+    } else {
+      // We ignore free regions, we'll empty the free list afterwards.
+      // We ignore young regions, we'll empty the young list afterwards.
       // We ignore humongous regions, we're not tearing down the
-      // humongous region set
-    } else {
-      // The rest should be old
-      _old_set->remove(r);
+      // humongous regions set.
+      assert(r->is_free() || r->is_young() || r->isHumongous(),
+             "it cannot be another type");
     }
     return false;
   }
@@ -6755,6 +6744,7 @@
 
     if (r->is_empty()) {
       // Add free regions to the free list
+      r->set_free();
       _hrm->insert_into_free_list(r);
     } else if (!_free_list_only) {
       assert(!r->is_young(), "we should not come across young regions");
@@ -6762,7 +6752,11 @@
       if (r->isHumongous()) {
         // We ignore humongous regions, we left the humongous set unchanged
       } else {
-        // The rest should be old, add them to the old set
+        // Objects that were compacted would have ended up on regions
+        // that were previously old or free.
+        assert(r->is_free() || r->is_old(), "invariant");
+        // We now consider them old, so register as such.
+        r->set_old();
         _old_set->add(r);
       }
       _total_used += r->used();
@@ -6829,7 +6823,7 @@
 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
                                                   size_t allocated_bytes) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
-  assert(alloc_region->is_young(), "all mutator alloc regions should be young");
+  assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
 
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
   _summary_bytes_used += allocated_bytes;
@@ -6888,6 +6882,7 @@
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
         check_bitmaps("Survivor Region Allocation", new_alloc_region);
       } else {
+        new_alloc_region->set_old();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
         check_bitmaps("Old Region Allocation", new_alloc_region);
       }
@@ -6999,9 +6994,11 @@
     } else if (hr->is_empty()) {
       assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
       _free_count.increment(1u, hr->capacity());
-    } else {
+    } else if (hr->is_old()) {
       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
       _old_count.increment(1u, hr->capacity());
+    } else {
+      ShouldNotReachHere();
     }
     return false;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -1665,7 +1665,7 @@
 // Add the heap region at the head of the non-incremental collection set
 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
   assert(_inc_cset_build_state == Active, "Precondition");
-  assert(!hr->is_young(), "non-incremental add of young region");
+  assert(hr->is_old(), "the region should be old");
 
   assert(!hr->in_collection_set(), "should not already be in the CSet");
   hr->set_in_collection_set(true);
@@ -1811,7 +1811,7 @@
 // Add the region at the RHS of the incremental cset
 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
   // We should only ever be appending survivors at the end of a pause
-  assert( hr->is_survivor(), "Logic");
+  assert(hr->is_survivor(), "Logic");
 
   // Do the 'common' stuff
   add_region_to_incremental_cset_common(hr);
@@ -1829,7 +1829,7 @@
 // Add the region to the LHS of the incremental cset
 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
   // Survivors should be added to the RHS at the end of a pause
-  assert(!hr->is_survivor(), "Logic");
+  assert(hr->is_eden(), "Logic");
 
   // Do the 'common' stuff
   add_region_to_incremental_cset_common(hr);
@@ -1989,7 +1989,11 @@
   HeapRegion* hr = young_list->first_survivor_region();
   while (hr != NULL) {
     assert(hr->is_survivor(), "badly formed young list");
-    hr->set_young();
+    // There is a convention that all the young regions in the CSet
+    // are tagged as "eden", so we do this for the survivors here. We
+    // use the special set_eden_pre_gc() as it doesn't check that the
+    // region is free (which is not the case here).
+    hr->set_eden_pre_gc();
     hr = hr->get_next_young_region();
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -299,13 +299,13 @@
   // Accessors
 
   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
-    hr->set_young();
+    hr->set_eden();
     hr->install_surv_rate_group(_short_lived_surv_rate_group);
     hr->set_young_index_in_cset(young_index_in_cset);
   }
 
   void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
-    assert(hr->is_young() && hr->is_survivor(), "pre-condition");
+    assert(hr->is_survivor(), "pre-condition");
     hr->install_surv_rate_group(_survivor_surv_rate_group);
     hr->set_young_index_in_cset(young_index_in_cset);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -176,15 +176,17 @@
 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
   G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
-  OopsInHeapRegionClosure *_update_rset_cl;
   uint _worker_id;
 
+  DirtyCardQueue _dcq;
+  UpdateRSetDeferred _update_rset_cl;
+
 public:
   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
-                                OopsInHeapRegionClosure* update_rset_cl,
                                 uint worker_id) :
-    _g1h(g1h), _update_rset_cl(update_rset_cl),
-    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
+    _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
+    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
+    }
 
   bool doHeapRegion(HeapRegion *hr) {
     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
@@ -195,7 +197,7 @@
 
     if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
       if (hr->evacuation_failed()) {
-        RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
+        RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
                                             during_initial_mark,
                                             during_conc_mark,
                                             _worker_id);
@@ -214,7 +216,7 @@
         // whenever this might be required in the future.
         hr->rem_set()->reset_for_par_iteration();
         hr->reset_bot();
-        _update_rset_cl->set_region(hr);
+        _update_rset_cl.set_region(hr);
         hr->object_iterate(&rspc);
 
         hr->rem_set()->clean_strong_code_roots(hr);
@@ -238,16 +240,7 @@
     _g1h(g1h) { }
 
   void work(uint worker_id) {
-    UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
-    DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
-    UpdateRSetDeferred deferred_update(_g1h, &dcq);
-
-    OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
-    if (!G1DeferredRSUpdate) {
-      update_rset_cl = &immediate_update;
-    }
-
-    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
+    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
 
     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -237,10 +237,8 @@
   _last_gc_worker_times_ms.verify();
   _last_gc_worker_other_times_ms.verify();
 
-  if (G1DeferredRSUpdate) {
-    _last_redirty_logged_cards_time_ms.verify();
-    _last_redirty_logged_cards_processed_cards.verify();
-  }
+  _last_redirty_logged_cards_time_ms.verify();
+  _last_redirty_logged_cards_processed_cards.verify();
 }
 
 void G1GCPhaseTimes::note_string_dedup_fixup_start() {
@@ -352,12 +350,10 @@
     _recorded_non_young_cset_choice_time_ms));
   print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
   print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
-  if (G1DeferredRSUpdate) {
-    print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
-    if (G1Log::finest()) {
-      _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
-      _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
-    }
+  print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
+  if (G1Log::finest()) {
+    _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
+    _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
   }
   if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
     print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -27,7 +27,6 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1HotCardCache.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
 #include "runtime/atomic.inline.hpp"
 
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
@@ -136,7 +135,6 @@
 }
 
 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
-  assert(!hr->isHumongous(), "Should have been cleared");
   _card_counts.clear_region(hr);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -84,20 +84,6 @@
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
 
-  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
-
-  template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
-    // If the new value of the field points to the same region or
-    // is the to-space, we don't need to include it in the Rset updates.
-    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
-      size_t card_index = ctbs()->index_for(p);
-      // If the card hasn't been added to the buffer, do it.
-      if (ctbs()->mark_card_deferred(card_index)) {
-        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
-      }
-    }
-  }
-
  public:
   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
   ~G1ParScanThreadState();
@@ -124,8 +110,17 @@
     _refs->push(ref);
   }
 
-  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
-
+  template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
+    // If the new value of the field points to the same region or
+    // is the to-space, we don't need to include it in the Rset updates.
+    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
+      size_t card_index = ctbs()->index_for(p);
+      // If the card hasn't been added to the buffer, do it.
+      if (ctbs()->mark_card_deferred(card_index)) {
+        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
+      }
+    }
+  }
  private:
 
   inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -29,20 +29,6 @@
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "oops/oop.inline.hpp"
 
-template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
-  if (!from->is_survivor()) {
-    _g1_rem->par_write_ref(from, p, tid);
-  }
-}
-
-template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
-  if (G1DeferredRSUpdate) {
-    deferred_rs_update(from, p, tid);
-  } else {
-    immediate_rs_update(from, p, tid);
-  }
-}
-
 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
   assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
          "Reference should not be NULL here as such are never pushed to the task queue.");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -339,12 +339,8 @@
   // are just discarded (there's no need to update the RSets of regions
   // that were in the collection set - after the pause these regions
   // are wholly 'free' of live objects. In the event of an evacuation
-  // failure the cards/buffers in this queue set are:
-  // * passed to the DirtyCardQueueSet that is used to manage deferred
-  //   RSet updates, or
-  // * scanned for references that point into the collection set
-  //   and the RSet of the corresponding region in the collection set
-  //   is updated immediately.
+  // failure the cards/buffers in this queue set are passed to the
+  // DirtyCardQueueSet that is used to manage RSet updates
   DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
 
   assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
@@ -358,7 +354,6 @@
 
 void G1RemSet::prepare_for_oops_into_collection_set_do() {
   cleanupHRRS();
-  ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
   _g1->set_refine_cte_cl_concurrency(false);
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   dcqs.concatenate_logs();
@@ -371,66 +366,6 @@
   _total_cards_scanned = 0;
 }
 
-
-// This closure, applied to a DirtyCardQueueSet, is used to immediately
-// update the RSets for the regions in the CSet. For each card it iterates
-// through the oops which coincide with that card. It scans the reference
-// fields in each oop; when it finds an oop that points into the collection
-// set, the RSet for the region containing the referenced object is updated.
-class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
-  G1CollectedHeap* _g1;
-  CardTableModRefBS* _ct_bs;
-public:
-  UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
-                                          CardTableModRefBS* bs):
-    _g1(g1), _ct_bs(bs)
-  { }
-
-  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    // Construct the region representing the card.
-    HeapWord* start = _ct_bs->addr_for(card_ptr);
-    // And find the region containing it.
-    HeapRegion* r = _g1->heap_region_containing(start);
-
-    // Scan oops in the card looking for references into the collection set
-    // Don't use addr_for(card_ptr + 1) which can ask for
-    // a card beyond the heap.  This is not safe without a perm
-    // gen.
-    HeapWord* end   = start + CardTableModRefBS::card_size_in_words;
-    MemRegion scanRegion(start, end);
-
-    UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
-    FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
-    FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
-
-    // We can pass false as the "filter_young" parameter here as:
-    // * we should be in a STW pause,
-    // * the DCQS to which this closure is applied is used to hold
-    //   references that point into the collection set from the prior
-    //   RSet updating,
-    // * the post-write barrier shouldn't be logging updates to young
-    //   regions (but there is a situation where this can happen - see
-    //   the comment in G1RemSet::refine_card() below -
-    //   that should not be applicable here), and
-    // * during actual RSet updating, the filtering of cards in young
-    //   regions in HeapRegion::oops_on_card_seq_iterate_careful is
-    //   employed.
-    // As a result, when this closure is applied to "refs into cset"
-    // DCQS, we shouldn't see any cards in young regions.
-    update_rs_cl.set_region(r);
-    HeapWord* stop_point =
-      r->oops_on_card_seq_iterate_careful(scanRegion,
-                                          &filter_then_update_rs_cset_oop_cl,
-                                          false /* filter_young */,
-                                          NULL  /* card_ptr */);
-
-    // Since this is performed in the event of an evacuation failure, we
-    // we shouldn't see a non-null stop point
-    assert(stop_point == NULL, "saw an unallocated region");
-    return true;
-  }
-};
-
 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
   guarantee( _cards_scanned != NULL, "invariant" );
   _total_cards_scanned = 0;
@@ -451,25 +386,10 @@
     double restore_remembered_set_start = os::elapsedTime();
 
     // Restore remembered sets for the regions pointing into the collection set.
-    if (G1DeferredRSUpdate) {
-      // If deferred RS updates are enabled then we just need to transfer
-      // the completed buffers from (a) the DirtyCardQueueSet used to hold
-      // cards that contain references that point into the collection set
-      // to (b) the DCQS used to hold the deferred RS updates
-      _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
-    } else {
-
-      CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
-      UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
-
-      int n_completed_buffers = 0;
-      while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
-                                                    0, 0, true)) {
-        n_completed_buffers++;
-      }
-      assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
-    }
-
+    // We just need to transfer the completed buffers from the DirtyCardQueueSet
+    // used to hold cards that contain references that point into the collection set
+    // to the DCQS used to hold the deferred RS updates.
+    _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
     _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -193,18 +193,4 @@
   bool apply_to_weak_ref_discovered_field() { return true; }
 };
 
-class UpdateRSetImmediate: public OopsInHeapRegionClosure {
-private:
-  G1RemSet* _g1_rem_set;
-
-  template <class T> void do_oop_work(T* p);
-public:
-  UpdateRSetImmediate(G1RemSet* rs) :
-    _g1_rem_set(rs) {}
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-};
-
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -79,13 +79,4 @@
   _rs->par_write_ref(_from, p, _worker_i);
 }
 
-template <class T>
-inline void UpdateRSetImmediate::do_oop_work(T* p) {
-  assert(_from->is_in_reserved(p), "paranoia");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
-    _g1_rem_set->par_write_ref(_from, p, 0);
-  }
-}
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -259,14 +259,16 @@
     size_t code_root_elems = hrrs->strong_code_roots_list_length();
 
     RegionTypeCounter* current = NULL;
-    if (r->is_young()) {
+    if (r->is_free()) {
+      current = &_free;
+    } else if (r->is_young()) {
       current = &_young;
     } else if (r->isHumongous()) {
       current = &_humonguous;
-    } else if (r->is_empty()) {
-      current = &_free;
+    } else if (r->is_old()) {
+      current = &_old;
     } else {
-      current = &_old;
+      ShouldNotReachHere();
     }
     current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
     _all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -31,8 +31,6 @@
 #include "oops/oop.inline.hpp"
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-
 class DirtyCardQueueSet;
 class G1SATBCardTableLoggingModRefBS;
 
@@ -180,7 +178,4 @@
   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
 };
 
-
-#endif // INCLUDE_ALL_GCS
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -108,9 +108,6 @@
   develop(bool, G1RSBarrierRegionFilter, true,                              \
           "If true, generate region filtering code in RS barrier")          \
                                                                             \
-  develop(bool, G1DeferredRSUpdate, true,                                   \
-          "If true, use deferred RS updates")                               \
-                                                                            \
   develop(bool, G1RSLogCheckCardTable, false,                               \
           "If true, verify that no dirty cards remain after RS log "        \
           "processing.")                                                    \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -211,8 +211,6 @@
 }
 
 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
-  assert(_humongous_type == NotHumongous,
-         "we should have already filtered out humongous regions");
   assert(_humongous_start_region == NULL,
          "we should have already filtered out humongous regions");
   assert(_end == _orig_end,
@@ -222,7 +220,7 @@
 
   set_young_index_in_cset(-1);
   uninstall_surv_rate_group();
-  set_young_type(NotYoung);
+  set_free();
   reset_pre_dummy_top();
 
   if (!par) {
@@ -273,7 +271,7 @@
   assert(top() == bottom(), "should be empty");
   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 
-  _humongous_type = StartsHumongous;
+  _type.set_starts_humongous();
   _humongous_start_region = this;
 
   set_end(new_end);
@@ -287,11 +285,11 @@
   assert(top() == bottom(), "should be empty");
   assert(first_hr->startsHumongous(), "pre-condition");
 
-  _humongous_type = ContinuesHumongous;
+  _type.set_continues_humongous();
   _humongous_start_region = first_hr;
 }
 
-void HeapRegion::set_notHumongous() {
+void HeapRegion::clear_humongous() {
   assert(isHumongous(), "pre-condition");
 
   if (startsHumongous()) {
@@ -307,7 +305,6 @@
   }
 
   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
-  _humongous_type = NotHumongous;
   _humongous_start_region = NULL;
 }
 
@@ -327,12 +324,12 @@
                        MemRegion mr) :
     G1OffsetTableContigSpace(sharedOffsetArray, mr),
     _hrm_index(hrm_index),
-    _humongous_type(NotHumongous), _humongous_start_region(NULL),
+    _humongous_start_region(NULL),
     _in_collection_set(false),
     _next_in_special_set(NULL), _orig_end(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
-    _young_type(NotYoung), _next_young_region(NULL),
+    _next_young_region(NULL),
     _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
 #ifdef ASSERT
     _containing_set(NULL),
@@ -686,26 +683,11 @@
 
 void HeapRegion::print() const { print_on(gclog_or_tty); }
 void HeapRegion::print_on(outputStream* st) const {
-  if (isHumongous()) {
-    if (startsHumongous())
-      st->print(" HS");
-    else
-      st->print(" HC");
-  } else {
-    st->print("   ");
-  }
+  st->print(" %2s", get_short_type_str());
   if (in_collection_set())
     st->print(" CS");
   else
     st->print("   ");
-  if (is_young())
-    st->print(is_survivor() ? " SU" : " Y ");
-  else
-    st->print("   ");
-  if (is_empty())
-    st->print(" F");
-  else
-    st->print("  ");
   st->print(" TS %5d", _gc_time_stamp);
   st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
             prev_top_at_mark_start(), next_top_at_mark_start());
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
+#include "gc_implementation/g1/heapRegionType.hpp"
 #include "gc_implementation/g1/survRateGroup.hpp"
 #include "gc_implementation/shared/ageTable.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
@@ -34,8 +35,6 @@
 #include "memory/watermark.hpp"
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-
 // A HeapRegion is the smallest piece of a G1CollectedHeap that
 // can be collected independently.
 
@@ -55,10 +54,7 @@
 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
 #define HR_FORMAT_PARAMS(_hr_) \
                 (_hr_)->hrm_index(), \
-                (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
-                (_hr_)->startsHumongous() ? "HS" : \
-                (_hr_)->continuesHumongous() ? "HC" : \
-                !(_hr_)->is_empty() ? "O" : "F", \
+                (_hr_)->get_short_type_str(), \
                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
 
 // sentinel value for hrm_index
@@ -215,12 +211,6 @@
   friend class VMStructs;
  private:
 
-  enum HumongousType {
-    NotHumongous = 0,
-    StartsHumongous,
-    ContinuesHumongous
-  };
-
   // The remembered set for this region.
   // (Might want to make this "inline" later, to avoid some alloc failure
   // issues.)
@@ -232,7 +222,8 @@
   // The index of this region in the heap region sequence.
   uint  _hrm_index;
 
-  HumongousType _humongous_type;
+  HeapRegionType _type;
+
   // For a humongous region, region in which it starts.
   HeapRegion* _humongous_start_region;
   // For the start region of a humongous sequence, it's original end().
@@ -274,13 +265,6 @@
   // The calculated GC efficiency of the region.
   double _gc_efficiency;
 
-  enum YoungType {
-    NotYoung,                   // a region is not young
-    Young,                      // a region is young
-    Survivor                    // a region is young and it contains survivors
-  };
-
-  volatile YoungType _young_type;
   int  _young_index_in_cset;
   SurvRateGroup* _surv_rate_group;
   int  _age_index;
@@ -305,12 +289,6 @@
     _next_top_at_mark_start = bot;
   }
 
-  void set_young_type(YoungType new_type) {
-    //assert(_young_type != new_type, "setting the same type" );
-    // TODO: add more assertions here
-    _young_type = new_type;
-  }
-
   // Cached attributes used in the collection set policy information
 
   // The RSet length that was added to the total value
@@ -430,9 +408,21 @@
     _prev_marked_bytes = _next_marked_bytes = 0;
   }
 
-  bool isHumongous() const { return _humongous_type != NotHumongous; }
-  bool startsHumongous() const { return _humongous_type == StartsHumongous; }
-  bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
+  const char* get_type_str() const { return _type.get_str(); }
+  const char* get_short_type_str() const { return _type.get_short_str(); }
+
+  bool is_free() const { return _type.is_free(); }
+
+  bool is_young()    const { return _type.is_young();    }
+  bool is_eden()     const { return _type.is_eden();     }
+  bool is_survivor() const { return _type.is_survivor(); }
+
+  bool isHumongous() const { return _type.is_humongous(); }
+  bool startsHumongous() const { return _type.is_starts_humongous(); }
+  bool continuesHumongous() const { return _type.is_continues_humongous();   }
+
+  bool is_old() const { return _type.is_old(); }
+
   // For a humongous region, region in which it starts.
   HeapRegion* humongous_start_region() const {
     return _humongous_start_region;
@@ -496,7 +486,7 @@
   void set_continuesHumongous(HeapRegion* first_hr);
 
   // Unsets the humongous-related fields on the region.
-  void set_notHumongous();
+  void clear_humongous();
 
   // If the region has a remembered set, return a pointer to it.
   HeapRegionRemSet* rem_set() const {
@@ -623,9 +613,6 @@
   void calc_gc_efficiency(void);
   double gc_efficiency() { return _gc_efficiency;}
 
-  bool is_young() const     { return _young_type != NotYoung; }
-  bool is_survivor() const  { return _young_type == Survivor; }
-
   int  young_index_in_cset() const { return _young_index_in_cset; }
   void set_young_index_in_cset(int index) {
     assert( (index == -1) || is_young(), "pre-condition" );
@@ -677,11 +664,13 @@
     }
   }
 
-  void set_young() { set_young_type(Young); }
+  void set_free() { _type.set_free(); }
 
-  void set_survivor() { set_young_type(Survivor); }
+  void set_eden()        { _type.set_eden();        }
+  void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
+  void set_survivor()    { _type.set_survivor();    }
 
-  void set_not_young() { set_young_type(NotYoung); }
+  void set_old() { _type.set_old(); }
 
   // Determine if an object has been allocated since the last
   // mark performed by the collector. This returns true iff the object
@@ -809,6 +798,4 @@
   bool complete() { return _complete; }
 };
 
-#endif // INCLUDE_ALL_GCS
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -42,7 +42,9 @@
   assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
   assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
   assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
-  assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrm_index(), name()));
+  assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
+  assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
+  assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
   assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
 }
 #endif
@@ -85,16 +87,16 @@
   out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
   out->print_cr("  Region Assumptions");
   out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
-  out->print_cr("    empty             : %s", BOOL_TO_STR(regions_empty()));
+  out->print_cr("    free              : %s", BOOL_TO_STR(regions_free()));
   out->print_cr("  Attributes");
   out->print_cr("    length            : %14u", length());
   out->print_cr("    total capacity    : "SIZE_FORMAT_W(14)" bytes",
                 total_capacity_bytes());
 }
 
-HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker)
+HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker)
   : _name(name), _verify_in_progress(false),
-    _is_humongous(humongous), _is_empty(empty), _mt_safety_checker(mt_safety_checker),
+    _is_humongous(humongous), _is_free(free), _mt_safety_checker(mt_safety_checker),
     _count()
 { }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -81,7 +81,7 @@
   friend class VMStructs;
 private:
   bool _is_humongous;
-  bool _is_empty;
+  bool _is_free;
   HRSMtSafeChecker* _mt_safety_checker;
 
 protected:
@@ -102,9 +102,9 @@
   // not. Only used during verification.
   bool regions_humongous() { return _is_humongous; }
 
-  // Indicates whether all regions in the set should be empty or
+  // Indicates whether all regions in the set should be free or
   // not. Only used during verification.
-  bool regions_empty() { return _is_empty; }
+  bool regions_free() { return _is_free; }
 
   void check_mt_safety() {
     if (_mt_safety_checker != NULL) {
@@ -114,7 +114,7 @@
 
   virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { }
 
-  HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker);
+  HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker);
 
 public:
   const char* name() { return _name; }
@@ -171,7 +171,7 @@
   do {                                                                        \
     assert(((_set1_)->regions_humongous() ==                                  \
                                             (_set2_)->regions_humongous()) && \
-           ((_set1_)->regions_empty() == (_set2_)->regions_empty()),          \
+           ((_set1_)->regions_free() == (_set2_)->regions_free()),            \
            hrs_err_msg("the contents of set %s and set %s should match",      \
                        (_set1_)->name(), (_set2_)->name()));                  \
   } while (0)
@@ -184,7 +184,7 @@
 class HeapRegionSet : public HeapRegionSetBase {
 public:
   HeapRegionSet(const char* name, bool humongous, HRSMtSafeChecker* mt_safety_checker):
-    HeapRegionSetBase(name, humongous, false /* empty */, mt_safety_checker) { }
+    HeapRegionSetBase(name, humongous, false /* free */, mt_safety_checker) { }
 
   void bulk_remove(const HeapRegionSetCount& removed) {
     _count.decrement(removed.length(), removed.capacity());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegionType.hpp"
+
+bool HeapRegionType::is_valid(Tag tag) {
+  switch (tag) {
+    case FreeTag:
+    case EdenTag:
+    case SurvTag:
+    case HumStartsTag:
+    case HumContTag:
+    case OldTag:
+      return true;
+  }
+  return false;
+}
+
+const char* HeapRegionType::get_str() const {
+  hrt_assert_is_valid(_tag);
+  switch (_tag) {
+    case FreeTag:      return "FREE";
+    case EdenTag:      return "EDEN";
+    case SurvTag:      return "SURV";
+    case HumStartsTag: return "HUMS";
+    case HumContTag:   return "HUMC";
+    case OldTag:       return "OLD";
+  }
+  ShouldNotReachHere();
+  // keep some compilers happy
+  return NULL;
+}
+
+const char* HeapRegionType::get_short_str() const {
+  hrt_assert_is_valid(_tag);
+  switch (_tag) {
+    case FreeTag:      return "F";
+    case EdenTag:      return "E";
+    case SurvTag:      return "S";
+    case HumStartsTag: return "HS";
+    case HumContTag:   return "HC";
+    case OldTag:       return "O";
+  }
+  ShouldNotReachHere();
+  // keep some compilers happy
+  return NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionType.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
+
+#include "memory/allocation.hpp"
+
+#define hrt_assert_is_valid(tag) \
+  assert(is_valid((tag)), err_msg("invalid HR type: %u", (uint) (tag)))
+
+class HeapRegionType VALUE_OBJ_CLASS_SPEC {
+private:
+  // We encode the value of the heap region type so the generation can be
+  // determined quickly. The tag is split into two parts:
+  //
+  //   major type (young, humongous)                         : top N-1 bits
+  //   minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
+  //
+  // If there's need to increase the number of minor types in the
+  // future, we'll have to increase the size of the latter and hence
+  // decrease the size of the former.
+  //
+  // 0000 0 [ 0] Free
+  //
+  // 0001 0      Young Mask
+  // 0001 0 [ 2] Eden
+  // 0001 1 [ 3] Survivor
+  //
+  // 0010 0      Humongous Mask
+  // 0010 0 [ 4] Humongous Starts
+  // 0010 1 [ 5] Humongous Continues
+  //
+  // 01000 [ 8] Old
+  typedef enum {
+    FreeTag       = 0,
+
+    YoungMask     = 2,
+    EdenTag       = YoungMask,
+    SurvTag       = YoungMask + 1,
+
+    HumMask       = 4,
+    HumStartsTag  = HumMask,
+    HumContTag    = HumMask + 1,
+
+    OldTag        = 8
+  } Tag;
+
+  volatile Tag _tag;
+
+  static bool is_valid(Tag tag);
+
+  Tag get() const {
+    hrt_assert_is_valid(_tag);
+    return _tag;
+  }
+
+  // Sets the type to 'tag'.
+  void set(Tag tag) {
+    hrt_assert_is_valid(tag);
+    hrt_assert_is_valid(_tag);
+    _tag = tag;
+  }
+
+  // Sets the type to 'tag', expecting the type to be 'before'. This
+  // is available for when we want to add sanity checking to the type
+  // transition.
+  void set_from(Tag tag, Tag before) {
+    hrt_assert_is_valid(tag);
+    hrt_assert_is_valid(before);
+    hrt_assert_is_valid(_tag);
+    assert(_tag == before,
+           err_msg("HR tag: %u, expected: %u new tag; %u", _tag, before, tag));
+    _tag = tag;
+  }
+
+public:
+  // Queries
+
+  bool is_free() const { return get() == FreeTag; }
+
+  bool is_young()    const { return (get() & YoungMask) != 0; }
+  bool is_eden()     const { return get() == EdenTag;  }
+  bool is_survivor() const { return get() == SurvTag;  }
+
+  bool is_humongous()           const { return (get() & HumMask) != 0; }
+  bool is_starts_humongous()    const { return get() == HumStartsTag;  }
+  bool is_continues_humongous() const { return get() == HumContTag;    }
+
+  bool is_old() const { return get() == OldTag; }
+
+  // Setters
+
+  void set_free() { set(FreeTag); }
+
+  void set_eden()        { set_from(EdenTag, FreeTag); }
+  void set_eden_pre_gc() { set_from(EdenTag, SurvTag); }
+  void set_survivor()    { set_from(SurvTag, FreeTag); }
+
+  void set_starts_humongous()    { set_from(HumStartsTag, FreeTag); }
+  void set_continues_humongous() { set_from(HumContTag,   FreeTag); }
+
+  void set_old() { set(OldTag); }
+
+  // Misc
+
+  const char* get_str() const;
+  const char* get_short_str() const;
+
+  HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -66,9 +66,10 @@
 
 void GenerationSizer::initialize_size_info() {
   trace_gen_sizes("ps heap raw");
-  const size_t page_sz = os::page_size_for_region(_min_heap_byte_size,
-                                                  _max_heap_byte_size,
-                                                  8);
+  const size_t max_page_sz = os::page_size_for_region(_max_heap_byte_size, 8);
+  const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
+  const size_t min_page_sz = os::page_size_for_region(_min_heap_byte_size, min_pages);
+  const size_t page_sz = MIN2(max_page_sz, min_page_sz);
 
   // Can a page size be something else than a power of two?
   assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -41,7 +41,7 @@
 
   const size_t words = bits / BitsPerWord;
   const size_t raw_bytes = words * sizeof(idx_t);
-  const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
+  const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -403,7 +403,7 @@
 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 {
   const size_t raw_bytes = count * element_size;
-  const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
+  const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 
--- a/hotspot/src/share/vm/memory/heap.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/memory/heap.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -98,9 +98,13 @@
   _log2_segment_size = exact_log2(segment_size);
 
   // Reserve and initialize space for _memory.
-  const size_t page_size = os::can_execute_large_page_memory() ?
-          os::page_size_for_region(committed_size, reserved_size, 8) :
-          os::vm_page_size();
+  size_t page_size = os::vm_page_size();
+  if (os::can_execute_large_page_memory()) {
+    const size_t min_pages = 8;
+    page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
+                     os::page_size_for_region(reserved_size, min_pages));
+  }
+
   const size_t granularity = os::vm_allocation_granularity();
   const size_t r_align = MAX2(page_size, granularity);
   const size_t r_size = align_size_up(reserved_size, r_align);
--- a/hotspot/src/share/vm/prims/jni.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -3848,6 +3848,7 @@
   unit_test_function_call
 
 // Forward declaration
+void TestOS_test();
 void TestReservedSpace_test();
 void TestReserveMemorySpecial_test();
 void TestVirtualSpace_test();
@@ -3871,6 +3872,7 @@
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
+    run_unit_test(TestOS_test());
     run_unit_test(TestReservedSpace_test());
     run_unit_test(TestReserveMemorySpecial_test());
     run_unit_test(TestVirtualSpace_test());
--- a/hotspot/src/share/vm/runtime/os.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -1406,24 +1406,15 @@
   return (sp > (stack_limit + reserved_area));
 }
 
-size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
-                                uint min_pages)
-{
+size_t os::page_size_for_region(size_t region_size, size_t min_pages) {
   assert(min_pages > 0, "sanity");
   if (UseLargePages) {
-    const size_t max_page_size = region_max_size / min_pages;
+    const size_t max_page_size = region_size / min_pages;
 
-    for (unsigned int i = 0; _page_sizes[i] != 0; ++i) {
-      const size_t sz = _page_sizes[i];
-      const size_t mask = sz - 1;
-      if ((region_min_size & mask) == 0 && (region_max_size & mask) == 0) {
-        // The largest page size with no fragmentation.
-        return sz;
-      }
-
-      if (sz <= max_page_size) {
-        // The largest page size that satisfies the min_pages requirement.
-        return sz;
+    for (size_t i = 0; _page_sizes[i] != 0; ++i) {
+      const size_t page_size = _page_sizes[i];
+      if (page_size <= max_page_size && is_size_aligned(region_size, page_size)) {
+        return page_size;
       }
     }
   }
@@ -1660,3 +1651,63 @@
   return result;
 }
 #endif
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define assert_eq(a,b) assert(a == b, err_msg(SIZE_FORMAT " != " SIZE_FORMAT, a, b))
+
+class TestOS : AllStatic {
+  static size_t small_page_size() {
+    return os::vm_page_size();
+  }
+
+  static size_t large_page_size() {
+    const size_t large_page_size_example = 4 * M;
+    return os::page_size_for_region(large_page_size_example, 1);
+  }
+
+  static void test_page_size_for_region() {
+    if (UseLargePages) {
+      const size_t small_page = small_page_size();
+      const size_t large_page = large_page_size();
+
+      if (large_page > small_page) {
+        size_t num_small_pages_in_large = large_page / small_page;
+        size_t page = os::page_size_for_region(large_page, num_small_pages_in_large);
+
+        assert_eq(page, small_page);
+      }
+    }
+  }
+
+  static void test_page_size_for_region_alignment() {
+    if (UseLargePages) {
+      const size_t small_page = small_page_size();
+      const size_t large_page = large_page_size();
+      if (large_page > small_page) {
+        const size_t unaligned_region = large_page + 17;
+        size_t page = os::page_size_for_region(unaligned_region, 1);
+        assert_eq(page, small_page);
+
+        const size_t num_pages = 5;
+        const size_t aligned_region = large_page * num_pages;
+        page = os::page_size_for_region(aligned_region, num_pages);
+        assert_eq(page, large_page);
+      }
+    }
+  }
+
+ public:
+  static void run_tests() {
+    test_page_size_for_region();
+    test_page_size_for_region_alignment();
+  }
+};
+
+void TestOS_test() {
+  TestOS::run_tests();
+}
+
+#endif // PRODUCT
--- a/hotspot/src/share/vm/runtime/os.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -266,19 +266,11 @@
   // Return the default page size.
   static int    vm_page_size();
 
-  // Return the page size to use for a region of memory.  The min_pages argument
-  // is a hint intended to limit fragmentation; it says the returned page size
-  // should be <= region_max_size / min_pages.  Because min_pages is a hint,
-  // this routine may return a size larger than region_max_size / min_pages.
-  //
-  // The current implementation ignores min_pages if a larger page size is an
-  // exact multiple of both region_min_size and region_max_size.  This allows
-  // larger pages to be used when doing so would not cause fragmentation; in
-  // particular, a single page can be used when region_min_size ==
-  // region_max_size == a supported page size.
-  static size_t page_size_for_region(size_t region_min_size,
-                                     size_t region_max_size,
-                                     uint min_pages);
+  // Returns the page size to use for a region of memory.
+  // region_size / min_pages will always be greater than or equal to the
+  // returned value.
+  static size_t page_size_for_region(size_t region_size, size_t min_pages);
+
   // Return the largest page size that can be used
   static size_t max_page_size() {
     // The _page_sizes array is sorted in descending order.
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Thu Sep 18 18:19:44 2014 +0200
@@ -38,7 +38,7 @@
 }
 
 ReservedSpace::ReservedSpace(size_t size) {
-  size_t page_size = os::page_size_for_region(size, size, 1);
+  size_t page_size = os::page_size_for_region(size, 1);
   bool large_pages = page_size != (size_t)os::vm_page_size();
   // Don't force the alignment to be large page aligned,
   // since that will waste memory.
@@ -357,7 +357,7 @@
 
 
 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
-  const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
+  const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 }
 
@@ -992,7 +992,7 @@
     case Disable:
       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
     case Commit:
-      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
+      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
     }
   }
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -66,6 +66,9 @@
 #ifndef ATTRIBUTE_PRINTF
 #define ATTRIBUTE_PRINTF(fmt, vargs)
 #endif
+#ifndef ATTRIBUTE_SCANF
+#define ATTRIBUTE_SCANF(fmt, vargs)
+#endif
 
 
 #include "utilities/macros.hpp"
--- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -271,15 +271,16 @@
 #define PRAGMA_IMPLEMENTATION        #pragma implementation
 #define VALUE_OBJ_CLASS_SPEC
 
-#ifndef ATTRIBUTE_PRINTF
 // Diagnostic pragmas like the ones defined below in PRAGMA_FORMAT_NONLITERAL_IGNORED
 // were only introduced in GCC 4.2. Because we have no other possibility to ignore
 // these warnings for older versions of GCC, we simply don't decorate our printf-style
 // functions with __attribute__(format) in that case.
 #if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 2)) || (__GNUC__ > 4)
+#ifndef ATTRIBUTE_PRINTF
 #define ATTRIBUTE_PRINTF(fmt,vargs)  __attribute__((format(printf, fmt, vargs)))
-#else
-#define ATTRIBUTE_PRINTF(fmt,vargs)
+#endif
+#ifndef ATTRIBUTE_SCANF
+#define ATTRIBUTE_SCANF(fmt,vargs)  __attribute__((format(scanf, fmt, vargs)))
 #endif
 #endif
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Thu Sep 18 18:19:44 2014 +0200
@@ -265,14 +265,6 @@
 
 inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
 
-
-// Misc
-// NOTE: This one leads to an infinite recursion on Linux
-#ifndef LINUX
-int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr);
-#define vsnprintf local_vsnprintf
-#endif
-
 // Portability macros
 #define PRAGMA_INTERFACE
 #define PRAGMA_IMPLEMENTATION
--- a/hotspot/test/gc/arguments/TestParallelHeapSizeFlags.java	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/test/gc/arguments/TestParallelHeapSizeFlags.java	Thu Sep 18 18:19:44 2014 +0200
@@ -22,7 +22,6 @@
 */
 
 /*
- * @ignore 8049864
  * @test TestParallelHeapSizeFlags
  * @key gc
  * @bug 8006088
--- a/hotspot/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java	Tue Sep 16 12:13:13 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import sun.hotspot.WhiteBox;
-
-class AllocateBeyondMetaspaceSize {
-  public static Object dummy;
-
-  public static void main(String [] args) {
-    if (args.length != 2) {
-      throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
-    }
-
-    long metaspaceSize = Long.parseLong(args[0]);
-    long youngGenSize = Long.parseLong(args[1]);
-
-    run(metaspaceSize, youngGenSize);
-  }
-
-  private static void run(long metaspaceSize, long youngGenSize) {
-    WhiteBox wb = WhiteBox.getWhiteBox();
-
-    long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
-    long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
-
-    triggerYoungGC(youngGenSize);
-
-    wb.freeMetaspace(null, metaspace, metaspace);
-  }
-
-  private static void triggerYoungGC(long youngGenSize) {
-    long approxAllocSize = 32 * 1024;
-    long numAllocations  = 2 * youngGenSize / approxAllocSize;
-
-    for (long i = 0; i < numAllocations; i++) {
-      dummy = new byte[(int)approxAllocSize];
-    }
-  }
-}
--- a/hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Thu Sep 18 18:19:44 2014 +0200
@@ -26,7 +26,7 @@
  * @key gc
  * @bug 8049831
  * @library /testlibrary /testlibrary/whitebox
- * @build TestCMSClassUnloadingEnabledHWM AllocateBeyondMetaspaceSize
+ * @build TestCMSClassUnloadingEnabledHWM
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run driver TestCMSClassUnloadingEnabledHWM
@@ -35,9 +35,11 @@
 
 import com.oracle.java.testlibrary.OutputAnalyzer;
 import com.oracle.java.testlibrary.ProcessTools;
-
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
 import java.util.ArrayList;
 import java.util.Arrays;
+import sun.hotspot.WhiteBox;
 
 public class TestCMSClassUnloadingEnabledHWM {
   private static long MetaspaceSize = 32 * 1024 * 1024;
@@ -48,15 +50,18 @@
       "-Xbootclasspath/a:.",
       "-XX:+UnlockDiagnosticVMOptions",
       "-XX:+WhiteBoxAPI",
+      "-Xmx128m",
+      "-XX:CMSMaxAbortablePrecleanTime=1",
+      "-XX:CMSWaitDuration=50",
       "-XX:MetaspaceSize=" + MetaspaceSize,
       "-Xmn" + YoungGenSize,
       "-XX:+UseConcMarkSweepGC",
       "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
       "-XX:+PrintHeapAtGC",
       "-XX:+PrintGCDetails",
-      "AllocateBeyondMetaspaceSize",
-      "" + MetaspaceSize,
-      "" + YoungGenSize);
+      "-XX:+PrintGCTimeStamps",
+      TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
+      "" + MetaspaceSize);
     return new OutputAnalyzer(pb.start());
   }
 
@@ -88,5 +93,37 @@
     testWithCMSClassUnloading();
     testWithoutCMSClassUnloading();
   }
+
+  public static class AllocateBeyondMetaspaceSize {
+    public static void main(String [] args) throws Exception {
+      if (args.length != 1) {
+        throw new IllegalArgumentException("Usage: <MetaspaceSize>");
+      }
+
+      WhiteBox wb = WhiteBox.getWhiteBox();
+
+      // Allocate past the MetaspaceSize limit.
+      long metaspaceSize = Long.parseLong(args[0]);
+      long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
+      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      // Wait for at least one GC to occur. The caller will parse the log files produced.
+      GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
+      while (cmsGCBean.getCollectionCount() == 0) {
+        Thread.sleep(100);
+      }
+
+      wb.freeMetaspace(null, metaspace, metaspace);
+    }
+
+    private static GarbageCollectorMXBean getCMSGCBean() {
+      for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
+        if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
+          return gcBean;
+        }
+      }
+      return null;
+    }
+  }
 }
 
--- a/hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Thu Sep 18 18:19:44 2014 +0200
@@ -26,7 +26,7 @@
  * @key gc
  * @bug 8049831
  * @library /testlibrary /testlibrary/whitebox
- * @build TestG1ClassUnloadingHWM AllocateBeyondMetaspaceSize
+ * @build TestG1ClassUnloadingHWM
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run driver TestG1ClassUnloadingHWM
@@ -35,9 +35,9 @@
 
 import com.oracle.java.testlibrary.OutputAnalyzer;
 import com.oracle.java.testlibrary.ProcessTools;
-
 import java.util.ArrayList;
 import java.util.Arrays;
+import sun.hotspot.WhiteBox;
 
 public class TestG1ClassUnloadingHWM {
   private static long MetaspaceSize = 32 * 1024 * 1024;
@@ -54,7 +54,7 @@
       "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
       "-XX:+PrintHeapAtGC",
       "-XX:+PrintGCDetails",
-      "AllocateBeyondMetaspaceSize",
+      TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(),
       "" + MetaspaceSize,
       "" + YoungGenSize);
     return new OutputAnalyzer(pb.start());
@@ -88,5 +88,36 @@
     testWithG1ClassUnloading();
     testWithoutG1ClassUnloading();
   }
+
+  public static class AllocateBeyondMetaspaceSize {
+    public static Object dummy;
+
+    public static void main(String [] args) throws Exception {
+      if (args.length != 2) {
+        throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
+      }
+
+      WhiteBox wb = WhiteBox.getWhiteBox();
+
+      // Allocate past the MetaspaceSize limit
+      long metaspaceSize = Long.parseLong(args[0]);
+      long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
+      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      long youngGenSize = Long.parseLong(args[1]);
+      triggerYoungGCs(youngGenSize);
+
+      wb.freeMetaspace(null, metaspace, metaspace);
+    }
+
+    public static void triggerYoungGCs(long youngGenSize) {
+      long approxAllocSize = 32 * 1024;
+      long numAllocations  = 2 * youngGenSize / approxAllocSize;
+
+      for (long i = 0; i < numAllocations; i++) {
+        dummy = new byte[(int)approxAllocSize];
+      }
+    }
+  }
 }
 
--- a/hotspot/test/gc/g1/TestDeferredRSUpdate.java	Tue Sep 16 12:13:13 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test TestDeferredRSUpdate
- * @bug 8040977 8052170
- * @summary Ensure that running with -XX:-G1DeferredRSUpdate does not crash the VM
- * @key gc
- * @library /testlibrary
- */
-
-import com.oracle.java.testlibrary.ProcessTools;
-import com.oracle.java.testlibrary.OutputAnalyzer;
-
-public class TestDeferredRSUpdate {
-  public static void main(String[] args) throws Exception {
-    GCTest.main(args);
-
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
-                                                              "-Xmx10M",
-                                                              "-XX:+PrintGCDetails",
-                                                              // G1DeferredRSUpdate is a develop option, but we cannot limit execution of this test to only debug VMs.
-                                                              "-XX:+IgnoreUnrecognizedVMOptions",
-                                                              "-XX:-G1DeferredRSUpdate",
-                                                              GCTest.class.getName());
-
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldHaveExitValue(0);
-  }
-
-  static class GCTest {
-    private static Object[] garbage = new Object[32];
-
-    public static void main(String [] args) {
-      System.out.println("Creating garbage");
-      // Create 128MB of garbage. This should result in at least one minor GC, with
-      // some objects copied to old gen. As references from old to young are installed,
-      // the crash due to the use before initialize occurs.
-      Object prev = null;
-      Object prevPrev = null;
-      for (int i = 0; i < 1024; i++) {
-        Object[] next = new Object[32 * 1024];
-        next[0] = prev;
-        next[1] = prevPrev;
-
-        Object[] cur = (Object[]) garbage[i % garbage.length];
-        if (cur != null) {
-          cur[0] = null;
-          cur[1] = null;
-        }
-        garbage[i % garbage.length] = next;
-
-        prevPrev = prev;
-        prev = next;
-      }
-      System.out.println("Done");
-    }
-  }
-}
--- a/hotspot/test/gc/g1/TestHumongousShrinkHeap.java	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/test/gc/g1/TestHumongousShrinkHeap.java	Thu Sep 18 18:19:44 2014 +0200
@@ -26,7 +26,7 @@
  * @bug 8036025 8056043
  * @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects
  * @library /testlibrary
- * @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 -XX:+UseG1GC -XX:G1HeapRegionSize=1M -verbose:gc TestHumongousShrinkHeap
+ * @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=12 -XX:+UseG1GC -XX:G1HeapRegionSize=1M -verbose:gc TestHumongousShrinkHeap
  */
 
 import java.lang.management.ManagementFactory;
@@ -41,12 +41,24 @@
     public static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio";
     public static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio";
 
-    private static final ArrayList<ArrayList<byte[]>> garbage = new ArrayList<>();
-    private static final int PAGE_SIZE = 1024 * 1024; // 1M
-    private static final int PAGES_NUM = 5;
+    private static final List<List<byte[]>> garbage = new ArrayList();
+    private static final int REGION_SIZE = 1024 * 1024; // 1M
+    private static final int LISTS_COUNT = 10;
+    private static final int HUMON_SIZE = Math.round(.9f * REGION_SIZE);
+    private static final long AVAILABLE_MEMORY
+                              = Runtime.getRuntime().freeMemory();
+    private static final int HUMON_COUNT
+                             = (int) ((AVAILABLE_MEMORY / HUMON_SIZE)
+            / LISTS_COUNT);
 
 
     public static void main(String[] args) {
+        System.out.format("Running with %s max heap size. "
+                + "Will allocate humongous object of %s size %d times.%n",
+                MemoryUsagePrinter.humanReadableByteCount(AVAILABLE_MEMORY, false),
+                MemoryUsagePrinter.humanReadableByteCount(HUMON_SIZE, false),
+                HUMON_COUNT
+        );
         new TestHumongousShrinkHeap().test();
     }
 
@@ -54,8 +66,8 @@
         System.gc();
         MemoryUsagePrinter.printMemoryUsage("init");
 
-        eat();
-        MemoryUsagePrinter.printMemoryUsage("eaten");
+        allocate();
+        MemoryUsagePrinter.printMemoryUsage("allocated");
         MemoryUsage muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
 
         free();
@@ -72,15 +84,12 @@
         ));
     }
 
-    private void eat() {
-        int HumongousObjectSize = Math.round(.9f * PAGE_SIZE);
-        System.out.println("Will allocate objects of size=" +
-                MemoryUsagePrinter.humanReadableByteCount(HumongousObjectSize, true));
+    private void allocate() {
 
-        for (int i = 0; i < PAGES_NUM; i++) {
-            ArrayList<byte[]> stuff = new ArrayList<>();
-            eatList(stuff, 100, HumongousObjectSize);
-            MemoryUsagePrinter.printMemoryUsage("eat #" + i);
+        for (int i = 0; i < LISTS_COUNT; i++) {
+            List<byte[]> stuff = new ArrayList();
+            allocateList(stuff, HUMON_COUNT, HUMON_SIZE);
+            MemoryUsagePrinter.printMemoryUsage("allocate #" + (i+1));
             garbage.add(stuff);
         }
     }
@@ -90,12 +99,12 @@
         garbage.subList(0, garbage.size() - 1).clear();
 
         // do not free last one element from last list
-        ArrayList stuff = garbage.get(garbage.size() - 1);
+        List stuff = garbage.get(garbage.size() - 1);
         stuff.subList(0, stuff.size() - 1).clear();
         System.gc();
     }
 
-    private static void eatList(List garbage, int count, int size) {
+    private static void allocateList(List garbage, int count, int size) {
         for (int i = 0; i < count; i++) {
             garbage.add(new byte[size]);
         }
@@ -122,9 +131,9 @@
         float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted();
         System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n",
                 label,
-                humanReadableByteCount(memusage.getInit(), true),
-                humanReadableByteCount(memusage.getUsed(), true),
-                humanReadableByteCount(memusage.getCommitted(), true),
+                humanReadableByteCount(memusage.getInit(), false),
+                humanReadableByteCount(memusage.getUsed(), false),
+                humanReadableByteCount(memusage.getCommitted(), false),
                 freeratio * 100
         );
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestShrinkDefragmentedHeap.java	Thu Sep 18 18:19:44 2014 +0200
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkDefragmentedHeap
+ * @bug 8038423
+ * @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects
+ *     1. allocate small objects mixed with humongous ones
+ *        "ssssHssssHssssHssssHssssHssssHssssH"
+ *     2. release all allocated object except the last humongous one
+ *        "..................................H"
+ *     3. invoke gc and check that memory returned to the system (amount of committed memory got down)
+ *
+ * @library /testlibrary
+ */
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryUsage;
+import java.util.ArrayList;
+import java.util.List;
+import sun.management.ManagementFactoryHelper;
+import static com.oracle.java.testlibrary.Asserts.*;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestShrinkDefragmentedHeap {
+    // Since we store all the small objects, they become old and old regions are also allocated at the bottom of the heap
+    // together with humongous regions. So if there are a lot of old regions in the lower part of the heap,
+    // the humongous regions will be allocated in the upper part of the heap anyway.
+    // To avoid this the Eden needs to be big enough to fit all the small objects.
+    private static final int INITIAL_HEAP_SIZE  = 200 * 1024 * 1024;
+    private static final int MINIMAL_YOUNG_SIZE = 190 * 1024 * 1024;
+    private static final int REGION_SIZE        = 1 * 1024 * 1024;
+
+    public static void main(String[] args) throws Exception, Throwable {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                "-XX:InitialHeapSize=" + INITIAL_HEAP_SIZE,
+                "-Xmn" + MINIMAL_YOUNG_SIZE,
+                "-XX:MinHeapFreeRatio=10",
+                "-XX:MaxHeapFreeRatio=11",
+                "-XX:+UseG1GC",
+                "-XX:G1HeapRegionSize=" + REGION_SIZE,
+                "-verbose:gc",
+                GCTest.class.getName()
+        );
+
+        OutputAnalyzer output = ProcessTools.executeProcess(pb);
+        output.shouldHaveExitValue(0);
+    }
+
+    static class GCTest {
+
+        private static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio";
+        private static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio";
+        private static final String NEW_SIZE_FLAG_NAME = "NewSize";
+
+        private static final ArrayList<ArrayList<byte[]>> garbage = new ArrayList<>();
+
+        private static final int SMALL_OBJS_SIZE  = 10 * 1024; // 10kB
+        private static final int SMALL_OBJS_COUNT = MINIMAL_YOUNG_SIZE / (SMALL_OBJS_SIZE-1);
+        private static final int ALLOCATE_COUNT = 3;
+        // try to put all humongous object into gap between min young size and initial heap size
+        // to avoid implicit GCs
+        private static final int HUMONG_OBJS_SIZE = (int) Math.max(
+                (INITIAL_HEAP_SIZE - MINIMAL_YOUNG_SIZE) / ALLOCATE_COUNT / 4,
+                REGION_SIZE * 1.1
+        );
+
+        private static final long initialHeapSize = getHeapMemoryUsage().getUsed();
+
+        public static void main(String[] args) throws InterruptedException {
+            new GCTest().test();
+        }
+
+        private void test() throws InterruptedException {
+            MemoryUsagePrinter.printMemoryUsage("init");
+
+            allocate();
+            System.gc();
+            MemoryUsage muFull = getHeapMemoryUsage();
+            MemoryUsagePrinter.printMemoryUsage("allocated");
+
+            free();
+            //Thread.sleep(1000); // sleep before measures due lags in JMX
+            MemoryUsage muFree = getHeapMemoryUsage();
+            MemoryUsagePrinter.printMemoryUsage("free");
+
+            assertLessThan(muFree.getCommitted(), muFull.getCommitted(), prepareMessageCommittedIsNotLess() );
+        }
+
+        private void allocate() {
+            System.out.format("Will allocate objects of small size = %s and humongous size = %s",
+                    MemoryUsagePrinter.humanReadableByteCount(SMALL_OBJS_SIZE, false),
+                    MemoryUsagePrinter.humanReadableByteCount(HUMONG_OBJS_SIZE, false)
+            );
+
+            for (int i = 0; i < ALLOCATE_COUNT; i++) {
+                ArrayList<byte[]> stuff = new ArrayList<>();
+                allocateList(stuff, SMALL_OBJS_COUNT / ALLOCATE_COUNT, SMALL_OBJS_SIZE);
+                garbage.add(stuff);
+
+                ArrayList<byte[]> humongousStuff = new ArrayList<>();
+                allocateList(humongousStuff, 4, HUMONG_OBJS_SIZE);
+                garbage.add(humongousStuff);
+            }
+        }
+
+        private void free() {
+            // do not free last one list
+            garbage.subList(0, garbage.size() - 1).clear();
+
+            // do not free last one element from last list
+            ArrayList stuff = garbage.get(garbage.size() - 1);
+            if (stuff.size() > 1) {
+                stuff.subList(0, stuff.size() - 1).clear();
+            }
+            System.gc();
+        }
+
+        private String prepareMessageCommittedIsNotLess() {
+            return String.format(
+                    "committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n"
+                    + "%s = %s%n%s = %s",
+                    MIN_FREE_RATIO_FLAG_NAME,
+                    ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(),
+                    MAX_FREE_RATIO_FLAG_NAME,
+                    ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue()
+            );
+        }
+
+        private static void allocateList(List garbage, int count, int size) {
+            for (int i = 0; i < count; i++) {
+                garbage.add(new byte[size]);
+            }
+        }
+    }
+
+    static MemoryUsage getHeapMemoryUsage() {
+        return ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+    }
+
+    /**
+     * Prints memory usage to standard output
+     */
+    static class MemoryUsagePrinter {
+
+        public static String humanReadableByteCount(long bytes, boolean si) {
+            int unit = si ? 1000 : 1024;
+            if (bytes < unit) {
+                return bytes + " B";
+            }
+            int exp = (int) (Math.log(bytes) / Math.log(unit));
+            String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i");
+            return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
+        }
+
+        public static void printMemoryUsage(String label) {
+            MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+            float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted();
+            System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n",
+                    label,
+                    humanReadableByteCount(memusage.getInit(), false),
+                    humanReadableByteCount(memusage.getUsed(), false),
+                    humanReadableByteCount(memusage.getCommitted(), false),
+                    freeratio * 100
+            );
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/memory/LargePages/TestLargePageSizeInBytes.java	Thu Sep 18 18:19:44 2014 +0200
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestLargePageSizeInBytes
+ * @summary Tests that the flag -XX:LargePageSizeInBytes does not cause warnings on Solaris
+ * @bug 8049536
+ * @library /testlibrary
+ * @run driver TestLargePageSizeInBytes
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class TestLargePageSizeInBytes {
+    private static long M = 1024L * 1024L;
+    private static long G = 1024L * M;
+
+    public static void main(String[] args) throws Exception {
+        if (!Platform.isSolaris()) {
+            // We only use the syscall mencntl on Solaris
+            return;
+        }
+
+        testLargePageSizeInBytes(4 * M);
+        testLargePageSizeInBytes(256 * M);
+        testLargePageSizeInBytes(512 * M);
+        testLargePageSizeInBytes(2 * G);
+    }
+
+    private static void testLargePageSizeInBytes(long size) throws Exception {
+        ProcessBuilder pb =
+            ProcessTools.createJavaProcessBuilder("-XX:+UseLargePages",
+                                                  "-XX:LargePageSizeInBytes=" + size,
+                                                  "-version");
+
+        OutputAnalyzer out = new OutputAnalyzer(pb.start());
+        out.shouldNotContain("Attempt to use MPSS failed.");
+        out.shouldHaveExitValue(0);
+    }
+}