Merge
authorjcoomes
Wed, 05 Mar 2008 17:37:04 -0800
changeset 187 970e734a656b
parent 173 bf2517e15f0c (current diff)
parent 186 32e6c95f8d9b (diff)
child 188 ad618a71f914
Merge
hotspot/src/share/vm/memory/allocationStats.cpp
hotspot/src/share/vm/memory/allocationStats.hpp
--- a/hotspot/build/solaris/makefiles/amd64.make	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/build/solaris/makefiles/amd64.make	Wed Mar 05 17:37:04 2008 -0800
@@ -19,7 +19,7 @@
 # Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 # CA 95054 USA or visit www.sun.com if you need additional information or
 # have any questions.
-#  
+#
 #
 
 # Must also specify if CPU is little endian
@@ -45,6 +45,10 @@
 OPT_CFLAGS/generateOptoStub.o = -xO2
 OPT_CFLAGS/thread.o = -xO2
 
+# Work around for 6624782
+OPT_CFLAGS/instanceKlass.o = -Qoption ube -no_a2lf
+OPT_CFLAGS/objArrayKlass.o = -Qoption ube -no_a2lf
+
 else
 
 ifeq ("${Platform_compiler}", "gcc")
@@ -58,6 +62,6 @@
 # error
 _JUNK2_ := $(shell echo >&2 \
        "*** ERROR: this compiler is not yet supported by this code base!")
-	@exit 1
+       @exit 1
 endif
 endif
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -28,6 +28,12 @@
 int VM_Version::_features = VM_Version::unknown_m;
 const char* VM_Version::_features_str = "";
 
+bool VM_Version::is_niagara1_plus() {
+  // This is a placeholder until the real test is determined.
+  return is_niagara1() &&
+    (os::processor_count() > maximum_niagara1_processor_count());
+}
+
 void VM_Version::initialize() {
   _features = determine_features();
   PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
@@ -160,3 +166,13 @@
 void VM_Version::revert() {
   _features = saved_features;
 }
+
+unsigned int VM_Version::calc_parallel_worker_threads() {
+  unsigned int result;
+  if (is_niagara1_plus()) {
+    result = nof_parallel_worker_threads(5, 16, 8);
+  } else {
+    result = nof_parallel_worker_threads(5, 8, 8);
+  }
+  return result;
+}
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -64,6 +64,11 @@
 
   static bool is_niagara1(int features) { return (features & niagara1_m) == niagara1_m; }
 
+  static int maximum_niagara1_processor_count() { return 32; }
+  // Returns true if the platform is in the niagara line and
+  // newer than the niagara1.
+  static bool is_niagara1_plus();
+
 public:
   // Initialization
   static void initialize();
@@ -129,4 +134,7 @@
 
   // Override the Abstract_VM_Version implementation.
   static uint page_size_count() { return is_sun4v() ? 4 : 2; }
+
+  // Calculates the number of parallel threads
+  static unsigned int calc_parallel_worker_threads();
 };
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -1071,85 +1071,56 @@
 // for each list in the tree.  Also print some summary
 // information.
 class printTreeCensusClosure : public AscendTreeCensusClosure {
+  int _print_line;
   size_t _totalFree;
-  AllocationStats _totals;
-  size_t _count;
+  FreeList _total;
 
  public:
   printTreeCensusClosure() {
+    _print_line = 0;
     _totalFree = 0;
-    _count = 0;
-    _totals.initialize();
   }
-  AllocationStats* totals() { return &_totals; }
-  size_t count() { return _count; }
-  void increment_count_by(size_t v) { _count += v; }
+  FreeList* total() { return &_total; }
   size_t totalFree() { return _totalFree; }
-  void increment_totalFree_by(size_t v) { _totalFree += v; }
   void do_list(FreeList* fl) {
-    bool nl = false; // "maybe this is not needed" isNearLargestChunk(fl->head());
-
-    gclog_or_tty->print("%c %4d\t\t" "%7d\t" "%7d\t"
-               "%7d\t"      "%7d\t" "%7d\t" "%7d\t"
-               "%7d\t"      "%7d\t" "%7d\t"
-               "%7d\t" "\n",
-               " n"[nl], fl->size(), fl->bfrSurp(), fl->surplus(),
-               fl->desired(), fl->prevSweep(), fl->beforeSweep(), fl->count(),
-               fl->coalBirths(), fl->coalDeaths(), fl->splitBirths(),
-               fl->splitDeaths());
-
-    increment_totalFree_by(fl->count() * fl->size());
-    increment_count_by(fl->count());
-    totals()->set_bfrSurp(totals()->bfrSurp() + fl->bfrSurp());
-    totals()->set_surplus(totals()->splitDeaths()     + fl->surplus());
-    totals()->set_prevSweep(totals()->prevSweep()   + fl->prevSweep());
-    totals()->set_beforeSweep(totals()->beforeSweep() + fl->beforeSweep());
-    totals()->set_coalBirths(totals()->coalBirths()  + fl->coalBirths());
-    totals()->set_coalDeaths(totals()->coalDeaths()  + fl->coalDeaths());
-    totals()->set_splitBirths(totals()->splitBirths() + fl->splitBirths());
-    totals()->set_splitDeaths(totals()->splitDeaths() + fl->splitDeaths());
+    if (++_print_line >= 40) {
+      FreeList::print_labels_on(gclog_or_tty, "size");
+      _print_line = 0;
+    }
+    fl->print_on(gclog_or_tty);
+    _totalFree +=            fl->count()            * fl->size()        ;
+    total()->set_count(      total()->count()       + fl->count()      );
+    total()->set_bfrSurp(    total()->bfrSurp()     + fl->bfrSurp()    );
+    total()->set_surplus(    total()->splitDeaths() + fl->surplus()    );
+    total()->set_desired(    total()->desired()     + fl->desired()    );
+    total()->set_prevSweep(  total()->prevSweep()   + fl->prevSweep()  );
+    total()->set_beforeSweep(total()->beforeSweep() + fl->beforeSweep());
+    total()->set_coalBirths( total()->coalBirths()  + fl->coalBirths() );
+    total()->set_coalDeaths( total()->coalDeaths()  + fl->coalDeaths() );
+    total()->set_splitBirths(total()->splitBirths() + fl->splitBirths());
+    total()->set_splitDeaths(total()->splitDeaths() + fl->splitDeaths());
   }
 };
 
 void BinaryTreeDictionary::printDictCensus(void) const {
 
   gclog_or_tty->print("\nBinaryTree\n");
-  gclog_or_tty->print(
-             "%4s\t\t" "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"
-             "%7s\t"   "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"     "\n",
-             "size",  "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
-             "count", "cBirths", "cDeaths", "sBirths", "sDeaths");
-
+  FreeList::print_labels_on(gclog_or_tty, "size");
   printTreeCensusClosure ptc;
   ptc.do_tree(root());
 
-  gclog_or_tty->print(
-             "\t\t"    "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"
-             "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"     "\n",
-                       "bfrsurp", "surplus", "prvSwep", "bfrSwep",
-             "count",  "cBirths", "cDeaths", "sBirths", "sDeaths");
+  FreeList* total = ptc.total();
+  FreeList::print_labels_on(gclog_or_tty, " ");
+  total->print_on(gclog_or_tty, "TOTAL\t");
   gclog_or_tty->print(
-             "%s\t\t"  "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"
-             "%7d\t"   "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"    "\n",
-             "totl",
-             ptc.totals()->bfrSurp(),
-             ptc.totals()->surplus(),
-             ptc.totals()->prevSweep(),
-             ptc.totals()->beforeSweep(),
-             ptc.count(),
-             ptc.totals()->coalBirths(),
-             ptc.totals()->coalDeaths(),
-             ptc.totals()->splitBirths(),
-             ptc.totals()->splitDeaths());
-  gclog_or_tty->print("totalFree(words): %7d growth: %8.5f  deficit: %8.5f\n",
+              "totalFree(words): " SIZE_FORMAT_W(16)
+              " growth: %8.5f  deficit: %8.5f\n",
               ptc.totalFree(),
-              (double)(ptc.totals()->splitBirths()+ptc.totals()->coalBirths()
-                       -ptc.totals()->splitDeaths()-ptc.totals()->coalDeaths())
-              /(ptc.totals()->prevSweep() != 0 ?
-                (double)ptc.totals()->prevSweep() : 1.0),
-             (double)(ptc.totals()->desired() - ptc.count())
-             /(ptc.totals()->desired() != 0 ?
-               (double)ptc.totals()->desired() : 1.0));
+              (double)(total->splitBirths() + total->coalBirths()
+                     - total->splitDeaths() - total->coalDeaths())
+              /(total->prevSweep() != 0 ? (double)total->prevSweep() : 1.0),
+             (double)(total->desired() - total->count())
+             /(total->desired() != 0 ? (double)total->desired() : 1.0));
 }
 
 // Verify the following tree invariants:
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -1835,7 +1835,7 @@
   guarantee(false, "NYI");
 }
 
-bool CompactibleFreeListSpace::linearAllocationWouldFail() {
+bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
   return _smallLinearAllocBlock._word_size == 0;
 }
 
@@ -1906,6 +1906,13 @@
   }
 }
 
+// Support for concurrent collection policy decisions.
+bool CompactibleFreeListSpace::should_concurrent_collect() const {
+  // In the future we might want to add in frgamentation stats --
+  // including erosion of the "mountain" into this decision as well.
+  return !adaptive_freelists() && linearAllocationWouldFail();
+}
+
 // Support for compaction
 
 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
@@ -2013,11 +2020,11 @@
   }
 }
 
-void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
+void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
   setFLSurplus();
   setFLHints();
   if (PrintGC && PrintFLSCensus > 0) {
-    printFLCensus(sweepCt);
+    printFLCensus(sweep_count);
   }
   clearFLCensus();
   assert_locked();
@@ -2293,59 +2300,37 @@
 }
 #endif
 
-void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
+void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
   assert_lock_strong(&_freelistLock);
-  ssize_t bfrSurp     = 0;
-  ssize_t surplus     = 0;
-  ssize_t desired     = 0;
-  ssize_t prevSweep   = 0;
-  ssize_t beforeSweep = 0;
-  ssize_t count       = 0;
-  ssize_t coalBirths  = 0;
-  ssize_t coalDeaths  = 0;
-  ssize_t splitBirths = 0;
-  ssize_t splitDeaths = 0;
-  gclog_or_tty->print("end sweep# %d\n", sweepCt);
-  gclog_or_tty->print("%4s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
-             "%7s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
-             "%7s\t"    "\n",
-             "size",    "bfrsurp",   "surplus",   "desired",   "prvSwep",
-             "bfrSwep", "count",     "cBirths",   "cDeaths",   "sBirths",
-             "sDeaths");
-
+  FreeList total;
+  gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
+  FreeList::print_labels_on(gclog_or_tty, "size");
   size_t totalFree = 0;
   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     const FreeList *fl = &_indexedFreeList[i];
-        totalFree += fl->count() * fl->size();
-
-    gclog_or_tty->print("%4d\t"          "%7d\t"             "%7d\t"        "%7d\t"
-               "%7d\t"          "%7d\t"             "%7d\t"        "%7d\t"
-               "%7d\t"          "%7d\t"             "%7d\t"        "\n",
-               fl->size(),       fl->bfrSurp(),     fl->surplus(), fl->desired(),
-               fl->prevSweep(),  fl->beforeSweep(), fl->count(),   fl->coalBirths(),
-               fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
-    bfrSurp     += fl->bfrSurp();
-    surplus     += fl->surplus();
-    desired     += fl->desired();
-    prevSweep   += fl->prevSweep();
-    beforeSweep += fl->beforeSweep();
-    count       += fl->count();
-    coalBirths  += fl->coalBirths();
-    coalDeaths  += fl->coalDeaths();
-    splitBirths += fl->splitBirths();
-    splitDeaths += fl->splitDeaths();
+    totalFree += fl->count() * fl->size();
+    if (i % (40*IndexSetStride) == 0) {
+      FreeList::print_labels_on(gclog_or_tty, "size");
+    }
+    fl->print_on(gclog_or_tty);
+    total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
+    total.set_surplus(    total.surplus()     + fl->surplus()    );
+    total.set_desired(    total.desired()     + fl->desired()    );
+    total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
+    total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
+    total.set_count(      total.count()       + fl->count()      );
+    total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
+    total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
+    total.set_splitBirths(total.splitBirths() + fl->splitBirths());
+    total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
   }
-  gclog_or_tty->print("%4s\t"
-            "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t"
-            "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t" "\n",
-            "totl",
-            bfrSurp,     surplus,     desired,     prevSweep,     beforeSweep,
-            count,       coalBirths,  coalDeaths,  splitBirths,   splitDeaths);
-  gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
+  total.print_on(gclog_or_tty, "TOTAL");
+  gclog_or_tty->print_cr("Total free in indexed lists "
+                         SIZE_FORMAT " words", totalFree);
   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
-    (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
-            (prevSweep != 0 ? (double)prevSweep : 1.0),
-    (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
+    (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
+            (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
+    (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
   _dictionary->printDictCensus();
 }
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -418,7 +418,7 @@
   // chunk exists, return NULL.
   FreeChunk* find_chunk_at_end();
 
-  bool adaptive_freelists() { return _adaptive_freelists; }
+  bool adaptive_freelists() const { return _adaptive_freelists; }
 
   void set_collector(CMSCollector* collector) { _collector = collector; }
 
@@ -566,7 +566,7 @@
   FreeChunk* allocateScratch(size_t size);
 
   // returns true if either the small or large linear allocation buffer is empty.
-  bool       linearAllocationWouldFail();
+  bool       linearAllocationWouldFail() const;
 
   // Adjust the chunk for the minimum size.  This version is called in
   // most cases in CompactibleFreeListSpace methods.
@@ -585,6 +585,9 @@
   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
               bool coalesced);
 
+  // Support for decisions regarding concurrent collection policy
+  bool should_concurrent_collect() const;
+
   // Support for compaction
   void prepare_for_compaction(CompactPoint* cp);
   void adjust_pointers();
@@ -622,7 +625,7 @@
   // coalescing of chunks during the sweep of garbage.
 
   // Print the statistics for the free lists.
-  void printFLCensus(int sweepCt)         const;
+  void printFLCensus(size_t sweep_count) const;
 
   // Statistics functions
   // Initialize census for lists before the sweep.
@@ -635,12 +638,11 @@
   // Clear the census for each of the free lists.
   void clearFLCensus();
   // Perform functions for the census after the end of the sweep.
-  void endSweepFLCensus(int sweepCt);
+  void endSweepFLCensus(size_t sweep_count);
   // Return true if the count of free chunks is greater
   // than the desired number of free chunks.
   bool coalOverPopulated(size_t size);
 
-
 // Record (for each size):
 //
 //   split-births = #chunks added due to splits in (prev-sweep-end,
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -3121,12 +3121,7 @@
   if (GCExpandToAllocateDelayMillis > 0) {
     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   }
-  size_t adj_word_sz = CompactibleFreeListSpace::adjustObjectSize(word_size);
-  if (parallel) {
-    return cmsSpace()->par_allocate(adj_word_sz);
-  } else {
-    return cmsSpace()->allocate(adj_word_sz);
-  }
+  return have_lock_and_allocate(word_size, tlab);
 }
 
 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
@@ -5732,13 +5727,19 @@
   // in the perm_gen_verify_bit_map. In order to do that we traverse
   // all blocks in perm gen and mark all dead objects.
   if (verifying() && !cms_should_unload_classes()) {
-    CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
-                             bitMapLock());
     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
            "Should have already been allocated");
     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
                                markBitMap(), perm_gen_verify_bit_map());
-    _permGen->cmsSpace()->blk_iterate(&mdo);
+    if (asynch) {
+      CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
+                               bitMapLock());
+      _permGen->cmsSpace()->blk_iterate(&mdo);
+    } else {
+      // In the case of synchronous sweep, we already have
+      // the requisite locks/tokens.
+      _permGen->cmsSpace()->blk_iterate(&mdo);
+    }
   }
 
   if (asynch) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -302,3 +302,29 @@
 #endif
 }
 #endif
+
+// Print the "label line" for free list stats.
+void FreeList::print_labels_on(outputStream* st, const char* c) {
+  st->print("%16s\t", c);
+  st->print("%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"
+            "%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"    "\n",
+            "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
+            "count",   "cBirths", "cDeaths", "sBirths", "sDeaths");
+}
+
+// Print the AllocationStats for the given free list. If the second argument
+// to the call is a non-null string, it is printed in the first column;
+// otherwise, if the argument is null (the default), then the size of the
+// (free list) block is printed in the first column.
+void FreeList::print_on(outputStream* st, const char* c) const {
+  if (c != NULL) {
+    st->print("%16s", c);
+  } else {
+    st->print(SIZE_FORMAT_W(16), size());
+  }
+  st->print("\t"
+           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
+           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
+           bfrSurp(),             surplus(),             desired(),             prevSweep(),           beforeSweep(),
+           count(),               coalBirths(),          coalDeaths(),          splitBirths(),         splitDeaths());
+}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -38,6 +38,7 @@
 
 class FreeList VALUE_OBJ_CLASS_SPEC {
   friend class CompactibleFreeListSpace;
+  friend class printTreeCensusClosure;
   FreeChunk*    _head;          // List of free chunks
   FreeChunk*    _tail;          // Tail of list of free chunks
   size_t        _size;          // Size in Heap words of each chunks
@@ -63,10 +64,11 @@
  protected:
   void init_statistics();
   void set_count(ssize_t v) { _count = v;}
-  void increment_count() { _count++; }
+  void increment_count()    { _count++; }
   void decrement_count() {
     _count--;
-    assert(_count >= 0, "Count should not be negative"); }
+    assert(_count >= 0, "Count should not be negative");
+  }
 
  public:
   // Constructor
@@ -159,6 +161,10 @@
   ssize_t desired() const {
     return _allocation_stats.desired();
   }
+  void set_desired(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_desired(v);
+  }
   void compute_desired(float inter_sweep_current,
                        float inter_sweep_estimate) {
     assert_proper_lock_protection();
@@ -298,4 +304,8 @@
   // Verify that the chunk is in the list.
   // found.  Return NULL if "fc" is not found.
   bool verifyChunkInFreeLists(FreeChunk* fc) const;
+
+  // Printing support
+  static void print_labels_on(outputStream* st, const char* c);
+  void print_on(outputStream* st, const char* c = NULL) const;
 };
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Wed Mar 05 17:37:04 2008 -0800
@@ -19,15 +19,22 @@
 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 // CA 95054 USA or visit www.sun.com if you need additional information or
 // have any questions.
-//  
+//
 //
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
 
-gcAdaptivePolicyCounters.hpp		adaptiveSizePolicy.hpp
-gcAdaptivePolicyCounters.hpp		gcPolicyCounters.hpp
+allocationStats.cpp                     allocationStats.hpp
+allocationStats.cpp                     ostream.hpp
 
-gcAdaptivePolicyCounters.cpp		resourceArea.hpp
+allocationStats.hpp                     allocation.hpp
+allocationStats.hpp                     gcUtil.hpp
+allocationStats.hpp                     globalDefinitions.hpp
+
+gcAdaptivePolicyCounters.hpp            adaptiveSizePolicy.hpp
+gcAdaptivePolicyCounters.hpp            gcPolicyCounters.hpp
+
+gcAdaptivePolicyCounters.cpp            resourceArea.hpp
 gcAdaptivePolicyCounters.cpp            gcAdaptivePolicyCounters.hpp
 
 gSpaceCounters.cpp                      generation.hpp
@@ -44,7 +51,7 @@
 
 isGCActiveMark.hpp                      parallelScavengeHeap.hpp
 
-markSweep.inline.hpp			psParallelCompact.hpp
+markSweep.inline.hpp                    psParallelCompact.hpp
 
 mutableNUMASpace.cpp                    mutableNUMASpace.hpp
 mutableNUMASpace.cpp                    sharedHeap.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -74,8 +74,8 @@
 #ifdef SHRINKS_AT_END_OF_EDEN
   size_t delta_in_survivor = 0;
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t space_alignment = heap->intra_generation_alignment();
-  const size_t gen_alignment = heap->generation_alignment();
+  const size_t space_alignment = heap->intra_heap_alignment();
+  const size_t gen_alignment = heap->object_heap_alignment();
 
   MutableSpace* space_shrinking = NULL;
   if (from_space()->end() > to_space()->end()) {
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -785,6 +785,9 @@
     swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
     from()->set_next_compaction_space(to());
     gch->set_incremental_collection_will_fail();
+
+    // Reset the PromotionFailureALot counters.
+    NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   }
   // set new iteration safe limit for the survivor spaces
   from()->set_concurrent_iteration_safe_limit(from()->top());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -86,7 +86,7 @@
   if (eden_space()->is_empty()) {
     // Respect the minimum size for eden and for the young gen as a whole.
     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-    const size_t eden_alignment = heap->intra_generation_alignment();
+    const size_t eden_alignment = heap->intra_heap_alignment();
     const size_t gen_alignment = heap->young_gen_alignment();
 
     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
@@ -124,7 +124,7 @@
 // to_space can be.
 size_t ASPSYoungGen::available_to_live() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t alignment = heap->intra_generation_alignment();
+  const size_t alignment = heap->intra_heap_alignment();
 
   // Include any space that is committed but is not in eden.
   size_t available = pointer_delta(eden_space()->bottom(),
@@ -275,7 +275,7 @@
   assert(eden_start < from_start, "Cannot push into from_space");
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t alignment = heap->intra_generation_alignment();
+  const size_t alignment = heap->intra_heap_alignment();
 
   // Check whether from space is below to space
   if (from_start < to_start) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -39,10 +39,10 @@
 
     // If the user hasn't explicitly set the number of worker
     // threads, set the count.
-    if (ParallelGCThreads == 0) {
-      assert(UseParallelGC, "Setting ParallelGCThreads without UseParallelGC");
-      ParallelGCThreads = os::active_processor_count();
-    }
+    assert(UseSerialGC ||
+           !FLAG_IS_DEFAULT(ParallelGCThreads) ||
+           (ParallelGCThreads > 0),
+           "ParallelGCThreads should be set before flag initialization");
 
     // The survivor ratio's are calculated "raw", unlike the
     // default gc, which adds 2 to the ratio value. We need to
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -173,7 +173,7 @@
     new PSAdaptiveSizePolicy(eden_capacity,
                              initial_promo_size,
                              young_gen()->to_space()->capacity_in_bytes(),
-                             intra_generation_alignment(),
+                             intra_heap_alignment(),
                              max_gc_pause_sec,
                              max_gc_minor_pause_sec,
                              GCTimeRatio
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -58,9 +58,9 @@
 
  public:
   ParallelScavengeHeap() : CollectedHeap() {
-    set_alignment(_perm_gen_alignment, intra_generation_alignment());
-    set_alignment(_young_gen_alignment, intra_generation_alignment());
-    set_alignment(_old_gen_alignment, intra_generation_alignment());
+    set_alignment(_perm_gen_alignment, intra_heap_alignment());
+    set_alignment(_young_gen_alignment, intra_heap_alignment());
+    set_alignment(_old_gen_alignment, intra_heap_alignment());
   }
 
   // For use by VM operations
@@ -92,14 +92,14 @@
 
   void post_initialize();
   void update_counters();
-
   // The alignment used for the various generations.
   size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
   size_t young_gen_alignment() const { return _young_gen_alignment; }
   size_t old_gen_alignment()  const { return _old_gen_alignment; }
 
-  // The alignment used for eden and survivors within the young gen.
-  size_t intra_generation_alignment() const { return 64 * K; }
+  // The alignment used for eden and survivors within the young gen
+  // and for boundary between young gen and old gen.
+  size_t intra_heap_alignment() const { return 64 * K; }
 
   size_t capacity() const;
   size_t used() const;
@@ -217,6 +217,6 @@
 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
 {
   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
-  var = round_to(val, intra_generation_alignment());
+  var = round_to(val, intra_heap_alignment());
   return var;
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -88,7 +88,7 @@
 
   // Compute maximum space sizes for performance counters
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  size_t alignment = heap->intra_generation_alignment();
+  size_t alignment = heap->intra_heap_alignment();
   size_t size = _virtual_space->reserved_size();
 
   size_t max_survivor_size;
@@ -141,7 +141,7 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   // Compute sizes
-  size_t alignment = heap->intra_generation_alignment();
+  size_t alignment = heap->intra_heap_alignment();
   size_t size = _virtual_space->committed_size();
 
   size_t survivor_size = size / InitialSurvivorRatio;
@@ -192,7 +192,7 @@
 #ifndef PRODUCT
 void PSYoungGen::space_invariants() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t alignment = heap->intra_generation_alignment();
+  const size_t alignment = heap->intra_heap_alignment();
 
   // Currently, our eden size cannot shrink to zero
   guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
@@ -392,7 +392,7 @@
   char* to_end     = (char*)to_space()->end();
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t alignment = heap->intra_generation_alignment();
+  const size_t alignment = heap->intra_heap_alignment();
   const bool maintain_minimum =
     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
 
@@ -708,7 +708,7 @@
 size_t PSYoungGen::available_to_live() {
   size_t delta_in_survivor = 0;
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t space_alignment = heap->intra_generation_alignment();
+  const size_t space_alignment = heap->intra_heap_alignment();
   const size_t gen_alignment = heap->young_gen_alignment();
 
   MutableSpace* space_shrinking = NULL;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_allocationStats.cpp.incl"
+
+// Technically this should be derived from machine speed, and
+// ideally it would be dynamically adjusted.
+float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class AllocationStats VALUE_OBJ_CLASS_SPEC {
+  // A duration threshold (in ms) used to filter
+  // possibly unreliable samples.
+  static float _threshold;
+
+  // We measure the demand between the end of the previous sweep and
+  // beginning of this sweep:
+  //   Count(end_last_sweep) - Count(start_this_sweep)
+  //     + splitBirths(between) - splitDeaths(between)
+  // The above number divided by the time since the start [END???] of the
+  // previous sweep gives us a time rate of demand for blocks
+  // of this size. We compute a padded average of this rate as
+  // our current estimate for the time rate of demand for blocks
+  // of this size. Similarly, we keep a padded average for the time
+  // between sweeps. Our current estimate for demand for blocks of
+  // this size is then simply computed as the product of these two
+  // estimates.
+  AdaptivePaddedAverage _demand_rate_estimate;
+
+  ssize_t     _desired;          // Estimate computed as described above
+  ssize_t     _coalDesired;     // desired +/- small-percent for tuning coalescing
+
+  ssize_t     _surplus;         // count - (desired +/- small-percent),
+                                // used to tune splitting in best fit
+  ssize_t     _bfrSurp;         // surplus at start of current sweep
+  ssize_t     _prevSweep;       // count from end of previous sweep
+  ssize_t     _beforeSweep;     // count from before current sweep
+  ssize_t     _coalBirths;      // additional chunks from coalescing
+  ssize_t     _coalDeaths;      // loss from coalescing
+  ssize_t     _splitBirths;     // additional chunks from splitting
+  ssize_t     _splitDeaths;     // loss from splitting
+  size_t     _returnedBytes;    // number of bytes returned to list.
+ public:
+  void initialize() {
+    AdaptivePaddedAverage* dummy =
+      new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
+                                                         CMS_FLSPadding);
+    _desired = 0;
+    _coalDesired = 0;
+    _surplus = 0;
+    _bfrSurp = 0;
+    _prevSweep = 0;
+    _beforeSweep = 0;
+    _coalBirths = 0;
+    _coalDeaths = 0;
+    _splitBirths = 0;
+    _splitDeaths = 0;
+    _returnedBytes = 0;
+  }
+
+  AllocationStats() {
+    initialize();
+  }
+  // The rate estimate is in blocks per second.
+  void compute_desired(size_t count,
+                       float inter_sweep_current,
+                       float inter_sweep_estimate) {
+    // If the latest inter-sweep time is below our granularity
+    // of measurement, we may call in here with
+    // inter_sweep_current == 0. However, even for suitably small
+    // but non-zero inter-sweep durations, we may not trust the accuracy
+    // of accumulated data, since it has not been "integrated"
+    // (read "low-pass-filtered") long enough, and would be
+    // vulnerable to noisy glitches. In such cases, we
+    // ignore the current sample and use currently available
+    // historical estimates.
+    if (inter_sweep_current > _threshold) {
+      ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths();
+      float rate = ((float)demand)/inter_sweep_current;
+      _demand_rate_estimate.sample(rate);
+      _desired = (ssize_t)(_demand_rate_estimate.padded_average()
+                           *inter_sweep_estimate);
+    }
+  }
+
+  ssize_t desired() const { return _desired; }
+  void set_desired(ssize_t v) { _desired = v; }
+
+  ssize_t coalDesired() const { return _coalDesired; }
+  void set_coalDesired(ssize_t v) { _coalDesired = v; }
+
+  ssize_t surplus() const { return _surplus; }
+  void set_surplus(ssize_t v) { _surplus = v; }
+  void increment_surplus() { _surplus++; }
+  void decrement_surplus() { _surplus--; }
+
+  ssize_t bfrSurp() const { return _bfrSurp; }
+  void set_bfrSurp(ssize_t v) { _bfrSurp = v; }
+  ssize_t prevSweep() const { return _prevSweep; }
+  void set_prevSweep(ssize_t v) { _prevSweep = v; }
+  ssize_t beforeSweep() const { return _beforeSweep; }
+  void set_beforeSweep(ssize_t v) { _beforeSweep = v; }
+
+  ssize_t coalBirths() const { return _coalBirths; }
+  void set_coalBirths(ssize_t v) { _coalBirths = v; }
+  void increment_coalBirths() { _coalBirths++; }
+
+  ssize_t coalDeaths() const { return _coalDeaths; }
+  void set_coalDeaths(ssize_t v) { _coalDeaths = v; }
+  void increment_coalDeaths() { _coalDeaths++; }
+
+  ssize_t splitBirths() const { return _splitBirths; }
+  void set_splitBirths(ssize_t v) { _splitBirths = v; }
+  void increment_splitBirths() { _splitBirths++; }
+
+  ssize_t splitDeaths() const { return _splitDeaths; }
+  void set_splitDeaths(ssize_t v) { _splitDeaths = v; }
+  void increment_splitDeaths() { _splitDeaths++; }
+
+  NOT_PRODUCT(
+    size_t returnedBytes() const { return _returnedBytes; }
+    void set_returnedBytes(size_t v) { _returnedBytes = v; }
+  )
+};
--- a/hotspot/src/share/vm/includeDB_core	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/includeDB_core	Wed Mar 05 17:37:04 2008 -0800
@@ -19,7 +19,7 @@
 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 // CA 95054 USA or visit www.sun.com if you need additional information or
 // have any questions.
-//  
+//
 //
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
@@ -46,13 +46,13 @@
 // as dependencies.  Header files named H.inline.hpp generally contain
 // bodies for inline functions declared in H.hpp.
 //
-// NOTE: Files that use the token "generate_platform_dependent_include" 
+// NOTE: Files that use the token "generate_platform_dependent_include"
 // are expected to contain macro references like <os>, <arch_model>, ... and
 // makedeps has a dependency on these platform files looking like:
-// foo_<macro>.trailing_string 
+// foo_<macro>.trailing_string
 // (where "trailing_string" can be any legal filename strings but typically
 // is "hpp" or "inline.hpp").
-// 
+//
 // The dependency in makedeps (and enforced) is that an underscore
 // will precedure the macro invocation. Note that this restriction
 // is only enforced on filenames that have the dependency token
@@ -148,12 +148,6 @@
 
 allocation.inline.hpp                   os.hpp
 
-allocationStats.cpp                     allocationStats.hpp
-
-allocationStats.hpp                     allocation.hpp
-allocationStats.hpp                     gcUtil.hpp
-allocationStats.hpp                     globalDefinitions.hpp
-
 aprofiler.cpp                           aprofiler.hpp
 aprofiler.cpp                           collectedHeap.inline.hpp
 aprofiler.cpp                           oop.inline.hpp
@@ -1935,7 +1929,7 @@
 
 init.cpp                                bytecodes.hpp
 init.cpp                                collectedHeap.hpp
-init.cpp				handles.inline.hpp
+init.cpp                                handles.inline.hpp
 init.cpp                                icBuffer.hpp
 init.cpp                                icache.hpp
 init.cpp                                init.hpp
--- a/hotspot/src/share/vm/memory/allocationStats.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
- * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_allocationStats.cpp.incl"
-
-// Technically this should be derived from machine speed, and
-// ideally it would be dynamically adjusted.
-float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
--- a/hotspot/src/share/vm/memory/allocationStats.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,136 +0,0 @@
-/*
- * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-class AllocationStats VALUE_OBJ_CLASS_SPEC {
-  // A duration threshold (in ms) used to filter
-  // possibly unreliable samples.
-  static float _threshold;
-
-  // We measure the demand between the end of the previous sweep and
-  // beginning of this sweep:
-  //   Count(end_last_sweep) - Count(start_this_sweep)
-  //     + splitBirths(between) - splitDeaths(between)
-  // The above number divided by the time since the start [END???] of the
-  // previous sweep gives us a time rate of demand for blocks
-  // of this size. We compute a padded average of this rate as
-  // our current estimate for the time rate of demand for blocks
-  // of this size. Similarly, we keep a padded average for the time
-  // between sweeps. Our current estimate for demand for blocks of
-  // this size is then simply computed as the product of these two
-  // estimates.
-  AdaptivePaddedAverage _demand_rate_estimate;
-
-  ssize_t     _desired;          // Estimate computed as described above
-  ssize_t     _coalDesired;     // desired +/- small-percent for tuning coalescing
-
-  ssize_t     _surplus;         // count - (desired +/- small-percent),
-                                // used to tune splitting in best fit
-  ssize_t     _bfrSurp;         // surplus at start of current sweep
-  ssize_t     _prevSweep;       // count from end of previous sweep
-  ssize_t     _beforeSweep;     // count from before current sweep
-  ssize_t     _coalBirths;      // additional chunks from coalescing
-  ssize_t     _coalDeaths;      // loss from coalescing
-  ssize_t     _splitBirths;     // additional chunks from splitting
-  ssize_t     _splitDeaths;     // loss from splitting
-  size_t     _returnedBytes;    // number of bytes returned to list.
- public:
-  void initialize() {
-    AdaptivePaddedAverage* dummy =
-      new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
-                                                         CMS_FLSPadding);
-    _desired = 0;
-    _coalDesired = 0;
-    _surplus = 0;
-    _bfrSurp = 0;
-    _prevSweep = 0;
-    _beforeSweep = 0;
-    _coalBirths = 0;
-    _coalDeaths = 0;
-    _splitBirths = 0;
-    _splitDeaths = 0;
-    _returnedBytes = 0;
-  }
-
-  AllocationStats() {
-    initialize();
-  }
-  // The rate estimate is in blocks per second.
-  void compute_desired(size_t count,
-                       float inter_sweep_current,
-                       float inter_sweep_estimate) {
-    // If the latest inter-sweep time is below our granularity
-    // of measurement, we may call in here with
-    // inter_sweep_current == 0. However, even for suitably small
-    // but non-zero inter-sweep durations, we may not trust the accuracy
-    // of accumulated data, since it has not been "integrated"
-    // (read "low-pass-filtered") long enough, and would be
-    // vulnerable to noisy glitches. In such cases, we
-    // ignore the current sample and use currently available
-    // historical estimates.
-    if (inter_sweep_current > _threshold) {
-      ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths();
-      float rate = ((float)demand)/inter_sweep_current;
-      _demand_rate_estimate.sample(rate);
-      _desired = (ssize_t)(_demand_rate_estimate.padded_average()
-                           *inter_sweep_estimate);
-    }
-  }
-
-  ssize_t desired() const { return _desired; }
-  ssize_t coalDesired() const { return _coalDesired; }
-  void set_coalDesired(ssize_t v) { _coalDesired = v; }
-
-  ssize_t surplus() const { return _surplus; }
-  void set_surplus(ssize_t v) { _surplus = v; }
-  void increment_surplus() { _surplus++; }
-  void decrement_surplus() { _surplus--; }
-
-  ssize_t bfrSurp() const { return _bfrSurp; }
-  void set_bfrSurp(ssize_t v) { _bfrSurp = v; }
-  ssize_t prevSweep() const { return _prevSweep; }
-  void set_prevSweep(ssize_t v) { _prevSweep = v; }
-  ssize_t beforeSweep() const { return _beforeSweep; }
-  void set_beforeSweep(ssize_t v) { _beforeSweep = v; }
-
-  ssize_t coalBirths() const { return _coalBirths; }
-  void set_coalBirths(ssize_t v) { _coalBirths = v; }
-  void increment_coalBirths() { _coalBirths++; }
-
-  ssize_t coalDeaths() const { return _coalDeaths; }
-  void set_coalDeaths(ssize_t v) { _coalDeaths = v; }
-  void increment_coalDeaths() { _coalDeaths++; }
-
-  ssize_t splitBirths() const { return _splitBirths; }
-  void set_splitBirths(ssize_t v) { _splitBirths = v; }
-  void increment_splitBirths() { _splitBirths++; }
-
-  ssize_t splitDeaths() const { return _splitDeaths; }
-  void set_splitDeaths(ssize_t v) { _splitDeaths = v; }
-  void increment_splitDeaths() { _splitDeaths++; }
-
-  NOT_PRODUCT(
-    size_t returnedBytes() const { return _returnedBytes; }
-    void set_returnedBytes(size_t v) { _returnedBytes = v; }
-  )
-};
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -196,8 +196,8 @@
   assert(_whole_heap.contains(new_region),
            "attempt to cover area not in reserved area");
   debug_only(verify_guard();)
-  int ind = find_covering_region_by_base(new_region.start());
-  MemRegion old_region = _covered[ind];
+  int const ind = find_covering_region_by_base(new_region.start());
+  MemRegion const old_region = _covered[ind];
   assert(old_region.start() == new_region.start(), "just checking");
   if (new_region.word_size() != old_region.word_size()) {
     // Commit new or uncommit old pages, if necessary.
@@ -205,21 +205,21 @@
     // Extend the end of this _commited region
     // to cover the end of any lower _committed regions.
     // This forms overlapping regions, but never interior regions.
-    HeapWord* max_prev_end = largest_prev_committed_end(ind);
+    HeapWord* const max_prev_end = largest_prev_committed_end(ind);
     if (max_prev_end > cur_committed.end()) {
       cur_committed.set_end(max_prev_end);
     }
     // Align the end up to a page size (starts are already aligned).
-    jbyte* new_end = byte_after(new_region.last());
-    HeapWord* new_end_aligned =
-      (HeapWord*)align_size_up((uintptr_t)new_end, _page_size);
+    jbyte* const new_end = byte_after(new_region.last());
+    HeapWord* const new_end_aligned =
+      (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
     assert(new_end_aligned >= (HeapWord*) new_end,
            "align up, but less");
     // The guard page is always committed and should not be committed over.
-    HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
+    HeapWord* const new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
     if (new_end_for_commit > cur_committed.end()) {
       // Must commit new pages.
-      MemRegion new_committed =
+      MemRegion const new_committed =
         MemRegion(cur_committed.end(), new_end_for_commit);
 
       assert(!new_committed.is_empty(), "Region should not be empty here");
@@ -233,7 +233,7 @@
     // the cur_committed region may include the guard region.
     } else if (new_end_aligned < cur_committed.end()) {
       // Must uncommit pages.
-      MemRegion uncommit_region =
+      MemRegion const uncommit_region =
         committed_unique_to_self(ind, MemRegion(new_end_aligned,
                                                 cur_committed.end()));
       if (!uncommit_region.is_empty()) {
@@ -257,7 +257,7 @@
     }
     assert(index_for(new_region.last()) < (int) _guard_index,
       "The guard card will be overwritten");
-    jbyte* end = byte_after(new_region.last());
+    jbyte* const end = byte_after(new_region.last());
     // do nothing if we resized downward.
     if (entry < end) {
       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -556,10 +556,16 @@
 }
 
 
-void CardTableRS::verify_empty(MemRegion mr) {
+void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
   if (!mr.is_empty()) {
     jbyte* cur_entry = byte_for(mr.start());
     jbyte* limit = byte_after(mr.last());
+    // The region mr may not start on a card boundary so
+    // the first card may reflect a write to the space
+    // just prior to mr.
+    if (!is_aligned(mr.start())) {
+      cur_entry++;
+    }
     for (;cur_entry < limit; cur_entry++) {
       guarantee(*cur_entry == CardTableModRefBS::clean_card,
                 "Unexpected dirty card found");
--- a/hotspot/src/share/vm/memory/cardTableRS.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/cardTableRS.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -126,7 +126,7 @@
   }
 
   void verify();
-  void verify_empty(MemRegion mr);
+  void verify_aligned_region_empty(MemRegion mr);
 
   void clear(MemRegion mr) { _ct_bs.clear(mr); }
   void clear_into_younger(Generation* gen, bool clear_perm);
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -57,45 +57,51 @@
   // User inputs from -mx and ms are aligned
   _initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(),
                                           min_alignment());
-  _min_heap_byte_size = align_size_up(Arguments::min_heap_size(),
-                                          min_alignment());
-  _max_heap_byte_size = align_size_up(MaxHeapSize, max_alignment());
+  set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(),
+                                          min_alignment()));
+  set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
 
   // Check validity of heap parameters from launcher
-  if (_initial_heap_byte_size == 0) {
-    _initial_heap_byte_size = NewSize + OldSize;
+  if (initial_heap_byte_size() == 0) {
+    set_initial_heap_byte_size(NewSize + OldSize);
   } else {
-    Universe::check_alignment(_initial_heap_byte_size, min_alignment(),
+    Universe::check_alignment(initial_heap_byte_size(), min_alignment(),
                             "initial heap");
   }
-  if (_min_heap_byte_size == 0) {
-    _min_heap_byte_size = NewSize + OldSize;
+  if (min_heap_byte_size() == 0) {
+    set_min_heap_byte_size(NewSize + OldSize);
   } else {
-    Universe::check_alignment(_min_heap_byte_size, min_alignment(),
+    Universe::check_alignment(min_heap_byte_size(), min_alignment(),
                             "initial heap");
   }
 
   // Check heap parameter properties
-  if (_initial_heap_byte_size < M) {
+  if (initial_heap_byte_size() < M) {
     vm_exit_during_initialization("Too small initial heap");
   }
   // Check heap parameter properties
-  if (_min_heap_byte_size < M) {
+  if (min_heap_byte_size() < M) {
     vm_exit_during_initialization("Too small minimum heap");
   }
-  if (_initial_heap_byte_size <= NewSize) {
+  if (initial_heap_byte_size() <= NewSize) {
      // make sure there is at least some room in old space
     vm_exit_during_initialization("Too small initial heap for new size specified");
   }
-  if (_max_heap_byte_size < _min_heap_byte_size) {
+  if (max_heap_byte_size() < min_heap_byte_size()) {
     vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
   }
-  if (_initial_heap_byte_size < _min_heap_byte_size) {
+  if (initial_heap_byte_size() < min_heap_byte_size()) {
     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
   }
-  if (_max_heap_byte_size < _initial_heap_byte_size) {
+  if (max_heap_byte_size() < initial_heap_byte_size()) {
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
+
+  if (PrintGCDetails && Verbose) {
+    gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
+      SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
+      min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
+  }
 }
 
 void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
@@ -128,10 +134,26 @@
 
 // GenCollectorPolicy methods.
 
+size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
+  size_t x = base_size / (NewRatio+1);
+  size_t new_gen_size = x > min_alignment() ?
+                     align_size_down(x, min_alignment()) :
+                     min_alignment();
+  return new_gen_size;
+}
+
+size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
+                                                 size_t maximum_size) {
+  size_t alignment = min_alignment();
+  size_t max_minus = maximum_size - alignment;
+  return desired_size < max_minus ? desired_size : max_minus;
+}
+
+
 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
                                                 size_t init_promo_size,
                                                 size_t init_survivor_size) {
-  double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
+  const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
   _size_policy = new AdaptiveSizePolicy(init_eden_size,
                                         init_promo_size,
                                         init_survivor_size,
@@ -210,74 +232,260 @@
   assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
 }
 
+// Values set on the command line win over any ergonomically
+// set command line parameters.
+// Ergonomic choice of parameters are done before this
+// method is called.  Values for command line parameters such as NewSize
+// and MaxNewSize feed those ergonomic choices into this method.
+// This method makes the final generation sizings consistent with
+// themselves and with overall heap sizings.
+// In the absence of explicitly set command line flags, policies
+// such as the use of NewRatio are used to size the generation.
 void GenCollectorPolicy::initialize_size_info() {
   CollectorPolicy::initialize_size_info();
 
-  // Minimum sizes of the generations may be different than
-  // the initial sizes.
-  if (!FLAG_IS_DEFAULT(NewSize)) {
-    _min_gen0_size = NewSize;
+  // min_alignment() is used for alignment within a generation.
+  // There is additional alignment done down stream for some
+  // collectors that sometimes causes unwanted rounding up of
+  // generations sizes.
+
+  // Determine maximum size of gen0
+
+  size_t max_new_size = 0;
+  if (FLAG_IS_CMDLINE(MaxNewSize)) {
+    if (MaxNewSize < min_alignment()) {
+      max_new_size = min_alignment();
+    } else if (MaxNewSize >= max_heap_byte_size()) {
+      max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
+                                     min_alignment());
+      warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
+        "greater than the entire heap (" SIZE_FORMAT "k).  A "
+        "new generation size of " SIZE_FORMAT "k will be used.",
+        MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
+    } else {
+      max_new_size = align_size_down(MaxNewSize, min_alignment());
+    }
+
+  // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
+  // specially at this point to just use an ergonomically set
+  // MaxNewSize to set max_new_size.  For cases with small
+  // heaps such a policy often did not work because the MaxNewSize
+  // was larger than the entire heap.  The interpretation given
+  // to ergonomically set flags is that the flags are set
+  // by different collectors for their own special needs but
+  // are not allowed to badly shape the heap.  This allows the
+  // different collectors to decide what's best for themselves
+  // without having to factor in the overall heap shape.  It
+  // can be the case in the future that the collectors would
+  // only make "wise" ergonomics choices and this policy could
+  // just accept those choices.  The choices currently made are
+  // not always "wise".
   } else {
-    _min_gen0_size = align_size_down(_min_heap_byte_size / (NewRatio+1),
-                                     min_alignment());
-    // We bound the minimum size by NewSize below (since it historically
+    max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
+    // Bound the maximum size by NewSize below (since it historically
     // would have been NewSize and because the NewRatio calculation could
     // yield a size that is too small) and bound it by MaxNewSize above.
-    // This is not always best.  The NewSize calculated by CMS (which has
-    // a fixed minimum of 16m) can sometimes be "too" large.  Consider
-    // the case where -Xmx32m.  The CMS calculated NewSize would be about
-    // half the entire heap which seems too large.  But the counter
-    // example is seen when the client defaults for NewRatio are used.
-    // An initial young generation size of 640k was observed
-    // with -Xmx128m -XX:MaxNewSize=32m when NewSize was not used
-    // as a lower bound as with
-    // _min_gen0_size = MIN2(_min_gen0_size, MaxNewSize);
-    // and 640k seemed too small a young generation.
-    _min_gen0_size = MIN2(MAX2(_min_gen0_size, NewSize), MaxNewSize);
+    // Ergonomics plays here by previously calculating the desired
+    // NewSize and MaxNewSize.
+    max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
+  }
+  assert(max_new_size > 0, "All paths should set max_new_size");
+
+  // Given the maximum gen0 size, determine the initial and
+  // minimum sizes.
+
+  if (max_heap_byte_size() == min_heap_byte_size()) {
+    // The maximum and minimum heap sizes are the same so
+    // the generations minimum and initial must be the
+    // same as its maximum.
+    set_min_gen0_size(max_new_size);
+    set_initial_gen0_size(max_new_size);
+    set_max_gen0_size(max_new_size);
+  } else {
+    size_t desired_new_size = 0;
+    if (!FLAG_IS_DEFAULT(NewSize)) {
+      // If NewSize is set ergonomically (for example by cms), it
+      // would make sense to use it.  If it is used, also use it
+      // to set the initial size.  Although there is no reason
+      // the minimum size and the initial size have to be the same,
+      // the current implementation gets into trouble during the calculation
+      // of the tenured generation sizes if they are different.
+      // Note that this makes the initial size and the minimum size
+      // generally small compared to the NewRatio calculation.
+      _min_gen0_size = NewSize;
+      desired_new_size = NewSize;
+      max_new_size = MAX2(max_new_size, NewSize);
+    } else {
+      // For the case where NewSize is the default, use NewRatio
+      // to size the minimum and initial generation sizes.
+      // Use the default NewSize as the floor for these values.  If
+      // NewRatio is overly large, the resulting sizes can be too
+      // small.
+      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
+                          NewSize);
+      desired_new_size =
+        MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
+             NewSize);
+    }
+
+    assert(_min_gen0_size > 0, "Sanity check");
+    set_initial_gen0_size(desired_new_size);
+    set_max_gen0_size(max_new_size);
+
+    // At this point the desirable initial and minimum sizes have been
+    // determined without regard to the maximum sizes.
+
+    // Bound the sizes by the corresponding overall heap sizes.
+    set_min_gen0_size(
+      bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
+    set_initial_gen0_size(
+      bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
+    set_max_gen0_size(
+      bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
+
+    // At this point all three sizes have been checked against the
+    // maximum sizes but have not been checked for consistency
+    // amoung the three.
+
+    // Final check min <= initial <= max
+    set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
+    set_initial_gen0_size(
+      MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
+    set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
   }
 
-  // Parameters are valid, compute area sizes.
-  size_t max_new_size = align_size_down(_max_heap_byte_size / (NewRatio+1),
-                                        min_alignment());
-  max_new_size = MIN2(MAX2(max_new_size, _min_gen0_size), MaxNewSize);
+  if (PrintGCDetails && Verbose) {
+    gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
+      SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
+      min_gen0_size(), initial_gen0_size(), max_gen0_size());
+  }
+}
 
-  // desired_new_size is used to set the initial size.  The
-  // initial size must be greater than the minimum size.
-  size_t desired_new_size =
-    align_size_down(_initial_heap_byte_size / (NewRatio+1),
-                  min_alignment());
+// Call this method during the sizing of the gen1 to make
+// adjustments to gen0 because of gen1 sizing policy.  gen0 initially has
+// the most freedom in sizing because it is done before the
+// policy for gen1 is applied.  Once gen1 policies have been applied,
+// there may be conflicts in the shape of the heap and this method
+// is used to make the needed adjustments.  The application of the
+// policies could be more sophisticated (iterative for example) but
+// keeping it simple also seems a worthwhile goal.
+bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
+                                                     size_t* gen1_size_ptr,
+                                                     size_t heap_size,
+                                                     size_t min_gen0_size) {
+  bool result = false;
+  if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
+    if (((*gen0_size_ptr + OldSize) > heap_size) &&
+       (heap_size - min_gen0_size) >= min_alignment()) {
+      // Adjust gen0 down to accomodate OldSize
+      *gen0_size_ptr = heap_size - min_gen0_size;
+      *gen0_size_ptr =
+        MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
+             min_alignment());
+      assert(*gen0_size_ptr > 0, "Min gen0 is too large");
+      result = true;
+    } else {
+      *gen1_size_ptr = heap_size - *gen0_size_ptr;
+      *gen1_size_ptr =
+        MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
+                       min_alignment());
+    }
+  }
+  return result;
+}
 
-  size_t new_size = MIN2(MAX2(desired_new_size, _min_gen0_size), max_new_size);
-
-  _initial_gen0_size = new_size;
-  _max_gen0_size = max_new_size;
-}
+// Minimum sizes of the generations may be different than
+// the initial sizes.  An inconsistently is permitted here
+// in the total size that can be specified explicitly by
+// command line specification of OldSize and NewSize and
+// also a command line specification of -Xms.  Issue a warning
+// but allow the values to pass.
 
 void TwoGenerationCollectorPolicy::initialize_size_info() {
   GenCollectorPolicy::initialize_size_info();
 
-  // Minimum sizes of the generations may be different than
-  // the initial sizes.  An inconsistently is permitted here
-  // in the total size that can be specified explicitly by
-  // command line specification of OldSize and NewSize and
-  // also a command line specification of -Xms.  Issue a warning
-  // but allow the values to pass.
-  if (!FLAG_IS_DEFAULT(OldSize)) {
-    _min_gen1_size = OldSize;
+  // At this point the minimum, initial and maximum sizes
+  // of the overall heap and of gen0 have been determined.
+  // The maximum gen1 size can be determined from the maximum gen0
+  // and maximum heap size since not explicit flags exits
+  // for setting the gen1 maximum.
+  _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
+  _max_gen1_size =
+    MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
+         min_alignment());
+  // If no explicit command line flag has been set for the
+  // gen1 size, use what is left for gen1.
+  if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
+    // The user has not specified any value or ergonomics
+    // has chosen a value (which may or may not be consistent
+    // with the overall heap size).  In either case make
+    // the minimum, maximum and initial sizes consistent
+    // with the gen0 sizes and the overall heap sizes.
+    assert(min_heap_byte_size() > _min_gen0_size,
+      "gen0 has an unexpected minimum size");
+    set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
+    set_min_gen1_size(
+      MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
+           min_alignment()));
+    set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
+    set_initial_gen1_size(
+      MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
+           min_alignment()));
+
+  } else {
+    // It's been explicitly set on the command line.  Use the
+    // OldSize and then determine the consequences.
+    set_min_gen1_size(OldSize);
+    set_initial_gen1_size(OldSize);
+
+    // If the user has explicitly set an OldSize that is inconsistent
+    // with other command line flags, issue a warning.
     // The generation minimums and the overall heap mimimum should
     // be within one heap alignment.
-    if ((_min_gen1_size + _min_gen0_size + max_alignment()) <
-         _min_heap_byte_size) {
+    if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
+           min_heap_byte_size()) {
       warning("Inconsistency between minimum heap size and minimum "
-        "generation sizes: using min heap = " SIZE_FORMAT,
-        _min_heap_byte_size);
+          "generation sizes: using minimum heap = " SIZE_FORMAT,
+          min_heap_byte_size());
+    }
+    if ((OldSize > _max_gen1_size)) {
+      warning("Inconsistency between maximum heap size and maximum "
+          "generation sizes: using maximum heap = " SIZE_FORMAT
+          " -XX:OldSize flag is being ignored",
+          max_heap_byte_size());
+  }
+    // If there is an inconsistency between the OldSize and the minimum and/or
+    // initial size of gen0, since OldSize was explicitly set, OldSize wins.
+    if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
+                          min_heap_byte_size(), OldSize)) {
+      if (PrintGCDetails && Verbose) {
+        gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
+              SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
+              min_gen0_size(), initial_gen0_size(), max_gen0_size());
+      }
     }
-  } else {
-    _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
+    // Initial size
+    if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
+                         initial_heap_byte_size(), OldSize)) {
+      if (PrintGCDetails && Verbose) {
+        gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
+          SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
+          min_gen0_size(), initial_gen0_size(), max_gen0_size());
+      }
+    }
   }
+  // Enforce the maximum gen1 size.
+  set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
 
-  _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
-  _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
+  // Check that min gen1 <= initial gen1 <= max gen1
+  set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
+  set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
+
+  if (PrintGCDetails && Verbose) {
+    gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
+      SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
+      min_gen1_size(), initial_gen1_size(), max_gen1_size());
+  }
 }
 
 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -82,8 +82,11 @@
   size_t max_alignment()                       { return _max_alignment; }
 
   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
+  void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
+  void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
+  void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
 
   enum Name {
     CollectorPolicyKind,
@@ -182,8 +185,24 @@
   // compute max heap alignment
   size_t compute_max_alignment();
 
+ // Scale the base_size by NewRation according to
+ //     result = base_size / (NewRatio + 1)
+ // and align by min_alignment()
+ size_t scale_by_NewRatio_aligned(size_t base_size);
+
+ // Bound the value by the given maximum minus the
+ // min_alignment.
+ size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 
  public:
+  // Accessors
+  size_t min_gen0_size() { return _min_gen0_size; }
+  void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
+  size_t initial_gen0_size() { return _initial_gen0_size; }
+  void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
+  size_t max_gen0_size() { return _max_gen0_size; }
+  void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
+
   virtual int number_of_generations() = 0;
 
   virtual GenerationSpec **generations()       {
@@ -236,6 +255,14 @@
   void initialize_generations()                { ShouldNotReachHere(); }
 
  public:
+  // Accessors
+  size_t min_gen1_size() { return _min_gen1_size; }
+  void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
+  size_t initial_gen1_size() { return _initial_gen1_size; }
+  void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
+  size_t max_gen1_size() { return _max_gen1_size; }
+  void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
+
   // Inherited methods
   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
 
@@ -246,6 +273,10 @@
   virtual CollectorPolicy::Name kind() {
     return CollectorPolicy::TwoGenerationCollectorPolicyKind;
   }
+
+  // Returns true is gen0 sizes were adjusted
+  bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
+                               size_t heap_size, size_t min_gen1_size);
 };
 
 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
--- a/hotspot/src/share/vm/memory/genRemSet.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/genRemSet.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -91,8 +91,15 @@
   virtual void verify() = 0;
 
   // Verify that the remembered set has no entries for
-  // the heap interval denoted by mr.
-  virtual void verify_empty(MemRegion mr) = 0;
+  // the heap interval denoted by mr.  If there are any
+  // alignment constraints on the remembered set, only the
+  // part of the region that is aligned is checked.
+  //
+  //   alignment boundaries
+  //   +--------+-------+--------+-------+
+  //         [ region mr              )
+  //            [ part checked   )
+  virtual void verify_aligned_region_empty(MemRegion mr) = 0;
 
   // If appropriate, print some information about the remset on "tty".
   virtual void print() {}
--- a/hotspot/src/share/vm/memory/heapInspection.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/heapInspection.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -65,7 +65,7 @@
       name = "<no name>";
   }
   // simplify the formatting (ILP32 vs LP64) - always cast the numbers to 64-bit
-  st->print_cr("%13" FORMAT64_MODIFIER "d  %13" FORMAT64_MODIFIER "u  %s",
+  st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s",
                (jlong)  _instance_count,
                (julong) _instance_words * HeapWordSize,
                name);
@@ -80,7 +80,10 @@
     elt = elt->next();
   }
   elt = new KlassInfoEntry(k, list());
-  set_list(elt);
+  // We may be out of space to allocate the new entry.
+  if (elt != NULL) {
+    set_list(elt);
+  }
   return elt;
 }
 
@@ -103,21 +106,25 @@
 }
 
 KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) {
-  _size = size;
+  _size = 0;
   _ref = ref;
-  _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, _size);
-
-  for (int index = 0; index < _size; index++) {
-    _buckets[index].initialize();
+  _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size);
+  if (_buckets != NULL) {
+    _size = size;
+    for (int index = 0; index < _size; index++) {
+      _buckets[index].initialize();
+    }
   }
 }
 
 KlassInfoTable::~KlassInfoTable() {
-  for (int index = 0; index < _size; index++) {
-    _buckets[index].empty();
+  if (_buckets != NULL) {
+    for (int index = 0; index < _size; index++) {
+      _buckets[index].empty();
+    }
+    FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
+    _size = 0;
   }
-  FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets);
-  _size = 0;
 }
 
 uint KlassInfoTable::hash(klassOop p) {
@@ -127,19 +134,32 @@
 
 KlassInfoEntry* KlassInfoTable::lookup(const klassOop k) {
   uint         idx = hash(k) % _size;
+  assert(_buckets != NULL, "Allocation failure should have been caught");
   KlassInfoEntry*  e   = _buckets[idx].lookup(k);
-  assert(k == e->klass(), "must be equal");
+  // Lookup may fail if this is a new klass for which we
+  // could not allocate space for an new entry.
+  assert(e == NULL || k == e->klass(), "must be equal");
   return e;
 }
 
-void KlassInfoTable::record_instance(const oop obj) {
+// Return false if the entry could not be recorded on account
+// of running out of space required to create a new entry.
+bool KlassInfoTable::record_instance(const oop obj) {
   klassOop      k = obj->klass();
   KlassInfoEntry* elt = lookup(k);
-  elt->set_count(elt->count() + 1);
-  elt->set_words(elt->words() + obj->size());
+  // elt may be NULL if it's a new klass for which we
+  // could not allocate space for a new entry in the hashtable.
+  if (elt != NULL) {
+    elt->set_count(elt->count() + 1);
+    elt->set_words(elt->words() + obj->size());
+    return true;
+  } else {
+    return false;
+  }
 }
 
 void KlassInfoTable::iterate(KlassInfoClosure* cic) {
+  assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
   for (int index = 0; index < _size; index++) {
     _buckets[index].iterate(cic);
   }
@@ -176,7 +196,7 @@
     total += elements()->at(i)->count();
     totalw += elements()->at(i)->words();
   }
-  st->print_cr("Total %13" FORMAT64_MODIFIER "d  %13" FORMAT64_MODIFIER "u",
+  st->print_cr("Total " INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13),
                total, totalw * HeapWordSize);
 }
 
@@ -199,12 +219,18 @@
 class RecordInstanceClosure : public ObjectClosure {
  private:
   KlassInfoTable* _cit;
+  size_t _missed_count;
  public:
-  RecordInstanceClosure(KlassInfoTable* cit) : _cit(cit) {}
+  RecordInstanceClosure(KlassInfoTable* cit) :
+    _cit(cit), _missed_count(0) {}
 
   void do_object(oop obj) {
-    _cit->record_instance(obj);
+    if (!_cit->record_instance(obj)) {
+      _missed_count++;
+    }
   }
+
+  size_t missed_count() { return _missed_count; }
 };
 
 void HeapInspection::heap_inspection(outputStream* st) {
@@ -230,21 +256,32 @@
       ShouldNotReachHere(); // Unexpected heap kind for this op
   }
   // Collect klass instance info
-
-  // Iterate over objects in the heap
   KlassInfoTable cit(KlassInfoTable::cit_size, ref);
-  RecordInstanceClosure ric(&cit);
-  Universe::heap()->object_iterate(&ric);
+  if (!cit.allocation_failed()) {
+    // Iterate over objects in the heap
+    RecordInstanceClosure ric(&cit);
+    Universe::heap()->object_iterate(&ric);
 
-  // Sort and print klass instance info
-  KlassInfoHisto histo("\n"
-                   " num     #instances         #bytes  class name\n"
-                   "----------------------------------------------",
-                   KlassInfoHisto::histo_initial_size);
-  HistoClosure hc(&histo);
-  cit.iterate(&hc);
-  histo.sort();
-  histo.print_on(st);
+    // Report if certain classes are not counted because of
+    // running out of C-heap for the histogram.
+    size_t missed_count = ric.missed_count();
+    if (missed_count != 0) {
+      st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
+                   " total instances in data below",
+                   missed_count);
+    }
+    // Sort and print klass instance info
+    KlassInfoHisto histo("\n"
+                     " num     #instances         #bytes  class name\n"
+                     "----------------------------------------------",
+                     KlassInfoHisto::histo_initial_size);
+    HistoClosure hc(&histo);
+    cit.iterate(&hc);
+    histo.sort();
+    histo.print_on(st);
+  } else {
+    st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
+  }
   st->flush();
 
   if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) {
--- a/hotspot/src/share/vm/memory/heapInspection.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/heapInspection.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -98,8 +98,9 @@
   };
   KlassInfoTable(int size, HeapWord* ref);
   ~KlassInfoTable();
-  void record_instance(const oop obj);
+  bool record_instance(const oop obj);
   void iterate(KlassInfoClosure* cic);
+  bool allocation_failed() { return _buckets == NULL; }
 };
 
 class KlassInfoHisto : public StackObj {
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -85,7 +85,7 @@
   ReferenceProcessor* rp =
     new ReferenceProcessor(span, atomic_discovery,
                            mt_discovery, mt_degree,
-                           mt_processing);
+                           mt_processing && (parallel_gc_threads > 0));
   if (rp == NULL) {
     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   }
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -409,10 +409,11 @@
 void TenuredGeneration::verify_alloc_buffers_clean() {
   if (UseParNewGC) {
     for (uint i = 0; i < ParallelGCThreads; i++) {
-      _rs->verify_empty(_alloc_buffers[i]->range());
+      _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
     }
   }
 }
+
 #else  // SERIALGC
 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
 void TenuredGeneration::verify_alloc_buffers_clean() {}
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -924,10 +924,18 @@
 void Arguments::set_parnew_gc_flags() {
   assert(!UseSerialGC && !UseParallelGC, "control point invariant");
 
+  // Turn off AdaptiveSizePolicy by default for parnew until it is
+  // complete.
+  if (UseParNewGC &&
+      FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
+    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
+  }
+
   if (FLAG_IS_DEFAULT(UseParNewGC) && ParallelGCThreads > 1) {
     FLAG_SET_DEFAULT(UseParNewGC, true);
   } else if (UseParNewGC && ParallelGCThreads == 0) {
-    FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads());
+    FLAG_SET_DEFAULT(ParallelGCThreads,
+                     Abstract_VM_Version::parallel_worker_threads());
     if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
       FLAG_SET_DEFAULT(UseParNewGC, false);
     }
@@ -956,25 +964,6 @@
   }
 }
 
-// CAUTION: this code is currently shared by UseParallelGC, UseParNewGC and
-// UseconcMarkSweepGC. Further tuning of individual collectors might
-// dictate refinement on a per-collector basis.
-int Arguments::nof_parallel_gc_threads() {
-  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
-    // For very large machines, there are diminishing returns
-    // for large numbers of worker threads.  Instead of
-    // hogging the whole system, use 5/8ths of a worker for every
-    // processor after the first 8.  For example, on a 72 cpu
-    // machine use 8 + (72 - 8) * (5/8) == 48 worker threads.
-    // This is just a start and needs further tuning and study in
-    // Tiger.
-    int ncpus = os::active_processor_count();
-    return (ncpus <= 8) ? ncpus : 3 + ((ncpus * 5) / 8);
-  } else {
-    return ParallelGCThreads;
-  }
-}
-
 // Adjust some sizes to suit CMS and/or ParNew needs; these work well on
 // sparc/solaris for certain applications, but would gain from
 // further optimization and tuning efforts, and would almost
@@ -984,26 +973,24 @@
     return;
   }
 
+  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
+
   // If we are using CMS, we prefer to UseParNewGC,
   // unless explicitly forbidden.
-  if (UseConcMarkSweepGC && !UseParNewGC && FLAG_IS_DEFAULT(UseParNewGC)) {
-    FLAG_SET_DEFAULT(UseParNewGC, true);
+  if (!UseParNewGC && FLAG_IS_DEFAULT(UseParNewGC)) {
+    FLAG_SET_ERGO(bool, UseParNewGC, true);
   }
 
   // Turn off AdaptiveSizePolicy by default for cms until it is
-  // complete.  Also turn it off in general if the
-  // parnew collector has been selected.
-  if ((UseConcMarkSweepGC || UseParNewGC) &&
-      FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
+  // complete.
+  if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
     FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
   }
 
   // In either case, adjust ParallelGCThreads and/or UseParNewGC
   // as needed.
-  set_parnew_gc_flags();
-
-  if (!UseConcMarkSweepGC) {
-    return;
+  if (UseParNewGC) {
+    set_parnew_gc_flags();
   }
 
   // Now make adjustments for CMS
@@ -1013,7 +1000,7 @@
   intx tenuring_default;
   if (CMSUseOldDefaults) {  // old defaults: "old" as of 6.0
     if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) {
-      FLAG_SET_DEFAULT(CMSYoungGenPerWorker, 4*M);
+      FLAG_SET_ERGO(intx, CMSYoungGenPerWorker, 4*M);
     }
     young_gen_per_worker = 4*M;
     new_ratio = (intx)15;
@@ -1038,16 +1025,20 @@
   // for "short" pauses ~ 4M*ParallelGCThreads
   if (FLAG_IS_DEFAULT(MaxNewSize)) {  // MaxNewSize not set at command-line
     if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
-      FLAG_SET_DEFAULT(MaxNewSize, MAX2(NewSize, preferred_max_new_size));
+      FLAG_SET_ERGO(uintx, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
     } else {
-      FLAG_SET_DEFAULT(MaxNewSize, preferred_max_new_size);
+      FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
     }
+    if(PrintGCDetails && Verbose) {
+      // Too early to use gclog_or_tty
+      tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
+  }
   }
   // Unless explicitly requested otherwise, prefer a large
   // Old to Young gen size so as to shift the collection load
   // to the old generation concurrent collector
   if (FLAG_IS_DEFAULT(NewRatio)) {
-    FLAG_SET_DEFAULT(NewRatio, MAX2(NewRatio, new_ratio));
+    FLAG_SET_ERGO(intx, NewRatio, MAX2(NewRatio, new_ratio));
 
     size_t min_new  = align_size_up(ScaleForWordSize(min_new_default), os::vm_page_size());
     size_t prev_initial_size = initial_heap_size();
@@ -1065,19 +1056,34 @@
     size_t max_heap = align_size_down(MaxHeapSize,
                                       CardTableRS::ct_max_alignment_constraint());
 
+    if(PrintGCDetails && Verbose) {
+      // Too early to use gclog_or_tty
+      tty->print_cr("CMS set min_heap_size: " SIZE_FORMAT
+           " initial_heap_size:  " SIZE_FORMAT
+           " max_heap: " SIZE_FORMAT,
+           min_heap_size(), initial_heap_size(), max_heap);
+    }
     if (max_heap > min_new) {
       // Unless explicitly requested otherwise, make young gen
       // at least min_new, and at most preferred_max_new_size.
       if (FLAG_IS_DEFAULT(NewSize)) {
-        FLAG_SET_DEFAULT(NewSize, MAX2(NewSize, min_new));
-        FLAG_SET_DEFAULT(NewSize, MIN2(preferred_max_new_size, NewSize));
+        FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
+        FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
+        if(PrintGCDetails && Verbose) {
+          // Too early to use gclog_or_tty
+          tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize);
+        }
       }
       // Unless explicitly requested otherwise, size old gen
       // so that it's at least 3X of NewSize to begin with;
       // later NewRatio will decide how it grows; see above.
       if (FLAG_IS_DEFAULT(OldSize)) {
         if (max_heap > NewSize) {
-          FLAG_SET_DEFAULT(OldSize, MIN2(3*NewSize,  max_heap - NewSize));
+          FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize,  max_heap - NewSize));
+          if(PrintGCDetails && Verbose) {
+            // Too early to use gclog_or_tty
+            tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize);
+          }
         }
       }
     }
@@ -1086,14 +1092,14 @@
   // promote all objects surviving "tenuring_default" scavenges.
   if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
       FLAG_IS_DEFAULT(SurvivorRatio)) {
-    FLAG_SET_DEFAULT(MaxTenuringThreshold, tenuring_default);
+    FLAG_SET_ERGO(intx, MaxTenuringThreshold, tenuring_default);
   }
   // If we decided above (or user explicitly requested)
   // `promote all' (via MaxTenuringThreshold := 0),
   // prefer minuscule survivor spaces so as not to waste
   // space for (non-existent) survivors
   if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
-    FLAG_SET_DEFAULT(SurvivorRatio, MAX2((intx)1024, SurvivorRatio));
+    FLAG_SET_ERGO(intx, SurvivorRatio, MAX2((intx)1024, SurvivorRatio));
   }
   // If OldPLABSize is set and CMSParPromoteBlocksToClaim is not,
   // set CMSParPromoteBlocksToClaim equal to OldPLABSize.
@@ -1102,7 +1108,11 @@
   // See CR 6362902.
   if (!FLAG_IS_DEFAULT(OldPLABSize)) {
     if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
-      FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
+      // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
+      // is.  In this situtation let CMSParPromoteBlocksToClaim follow
+      // the value (either from the command line or ergonomics) of
+      // OldPLABSize.  Following OldPLABSize is an ergonomics decision.
+      FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
     }
     else {
       // OldPLABSize and CMSParPromoteBlocksToClaim are both set.
@@ -1147,17 +1157,11 @@
         FLAG_IS_DEFAULT(UseParallelGC)) {
       if (should_auto_select_low_pause_collector()) {
         FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
-        set_cms_and_parnew_gc_flags();
       } else {
         FLAG_SET_ERGO(bool, UseParallelGC, true);
       }
       no_shared_spaces();
     }
-
-    // This is here because the parallel collector could
-    // have been selected so this initialization should
-    // still be done.
-    set_parallel_gc_flags();
   }
 }
 
@@ -1170,6 +1174,9 @@
   // If no heap maximum was requested explicitly, use some reasonable fraction
   // of the physical memory, up to a maximum of 1GB.
   if (UseParallelGC) {
+    FLAG_SET_ERGO(uintx, ParallelGCThreads,
+                  Abstract_VM_Version::parallel_worker_threads());
+
     if (FLAG_IS_DEFAULT(MaxHeapSize)) {
       const uint64_t reasonable_fraction =
         os::physical_memory() / DefaultMaxRAMFraction;
@@ -1227,12 +1234,13 @@
 
     if (UseParallelOldGC) {
       // Par compact uses lower default values since they are treated as
-      // minimums.
+      // minimums.  These are different defaults because of the different
+      // interpretation and are not ergonomically set.
       if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
-        MarkSweepDeadRatio = 1;
+        FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
       }
       if (FLAG_IS_DEFAULT(PermMarkSweepDeadRatio)) {
-        PermMarkSweepDeadRatio = 5;
+        FLAG_SET_DEFAULT(PermMarkSweepDeadRatio, 5);
       }
     }
   }
@@ -1312,6 +1320,31 @@
           UseParallelOldGC));
 }
 
+// Check consistency of GC selection
+bool Arguments::check_gc_consistency() {
+  bool status = true;
+  // Ensure that the user has not selected conflicting sets
+  // of collectors. [Note: this check is merely a user convenience;
+  // collectors over-ride each other so that only a non-conflicting
+  // set is selected; however what the user gets is not what they
+  // may have expected from the combination they asked for. It's
+  // better to reduce user confusion by not allowing them to
+  // select conflicting combinations.
+  uint i = 0;
+  if (UseSerialGC)                       i++;
+  if (UseConcMarkSweepGC || UseParNewGC) i++;
+  if (UseParallelGC || UseParallelOldGC) i++;
+  if (i > 1) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Conflicting collector combinations in option list; "
+                "please refer to the release notes for the combinations "
+                "allowed\n");
+    status = false;
+  }
+
+  return status;
+}
+
 // Check the consistency of vm_init_args
 bool Arguments::check_vm_args_consistency() {
   // Method for adding checks for flag consistency.
@@ -1354,14 +1387,14 @@
     status = false;
   }
 
-  status &= verify_percentage(MaxLiveObjectEvacuationRatio,
+  status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
                               "MaxLiveObjectEvacuationRatio");
-  status &= verify_percentage(AdaptiveSizePolicyWeight,
+  status = status && verify_percentage(AdaptiveSizePolicyWeight,
                               "AdaptiveSizePolicyWeight");
-  status &= verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
-  status &= verify_percentage(ThresholdTolerance, "ThresholdTolerance");
-  status &= verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
-  status &= verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio");
+  status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
+  status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance");
+  status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
+  status = status && verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio");
 
   if (MinHeapFreeRatio > MaxHeapFreeRatio) {
     jio_fprintf(defaultStream::error_stream(),
@@ -1377,14 +1410,14 @@
     MarkSweepAlwaysCompactCount = 1;  // Move objects every gc.
   }
 
-  status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
-  status &= verify_percentage(GCTimeLimit, "GCTimeLimit");
+  status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
+  status = status && verify_percentage(GCTimeLimit, "GCTimeLimit");
   if (GCTimeLimit == 100) {
     // Turn off gc-overhead-limit-exceeded checks
     FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
   }
 
-  status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
+  status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
 
   // Check user specified sharing option conflict with Parallel GC
   bool cannot_share = (UseConcMarkSweepGC || UseParallelGC ||
@@ -1402,24 +1435,7 @@
     }
   }
 
-  // Ensure that the user has not selected conflicting sets
-  // of collectors. [Note: this check is merely a user convenience;
-  // collectors over-ride each other so that only a non-conflicting
-  // set is selected; however what the user gets is not what they
-  // may have expected from the combination they asked for. It's
-  // better to reduce user confusion by not allowing them to
-  // select conflicting combinations.
-  uint i = 0;
-  if (UseSerialGC)                       i++;
-  if (UseConcMarkSweepGC || UseParNewGC) i++;
-  if (UseParallelGC || UseParallelOldGC) i++;
-  if (i > 1) {
-    jio_fprintf(defaultStream::error_stream(),
-                "Conflicting collector combinations in option list; "
-                "please refer to the release notes for the combinations "
-                "allowed\n");
-    status = false;
-  }
+  status = status && check_gc_consistency();
 
   if (_has_alloc_profile) {
     if (UseParallelGC || UseParallelOldGC) {
@@ -1451,15 +1467,15 @@
                   "allocation buffers\n(-XX:+UseTLAB).\n");
       status = false;
     } else {
-      status &= verify_percentage(CMSIncrementalDutyCycle,
+      status = status && verify_percentage(CMSIncrementalDutyCycle,
                                   "CMSIncrementalDutyCycle");
-      status &= verify_percentage(CMSIncrementalDutyCycleMin,
+      status = status && verify_percentage(CMSIncrementalDutyCycleMin,
                                   "CMSIncrementalDutyCycleMin");
-      status &= verify_percentage(CMSIncrementalSafetyFactor,
+      status = status && verify_percentage(CMSIncrementalSafetyFactor,
                                   "CMSIncrementalSafetyFactor");
-      status &= verify_percentage(CMSIncrementalOffset,
+      status = status && verify_percentage(CMSIncrementalOffset,
                                   "CMSIncrementalOffset");
-      status &= verify_percentage(CMSExpAvgFactor,
+      status = status && verify_percentage(CMSExpAvgFactor,
                                   "CMSExpAvgFactor");
       // If it was not set on the command line, set
       // CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
@@ -2064,7 +2080,8 @@
 
       // Enable parallel GC and adaptive generation sizing
       FLAG_SET_CMDLINE(bool, UseParallelGC, true);
-      FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads());
+      FLAG_SET_DEFAULT(ParallelGCThreads,
+                       Abstract_VM_Version::parallel_worker_threads());
 
       // Encourage steady state memory management
       FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100);
@@ -2451,15 +2468,25 @@
   no_shared_spaces();
 #endif // KERNEL
 
-  // Set some flags for ParallelGC if needed.
-  set_parallel_gc_flags();
-
-  // Set some flags for CMS and/or ParNew collectors, as needed.
-  set_cms_and_parnew_gc_flags();
-
   // Set flags based on ergonomics.
   set_ergonomics_flags();
 
+  // Check the GC selections again.
+  if (!check_gc_consistency()) {
+    return JNI_EINVAL;
+  }
+
+  if (UseParallelGC || UseParallelOldGC) {
+    // Set some flags for ParallelGC if needed.
+    set_parallel_gc_flags();
+  } else if (UseConcMarkSweepGC) {
+    // Set some flags for CMS
+    set_cms_and_parnew_gc_flags();
+  } else if (UseParNewGC) {
+    // Set some flags for ParNew
+    set_parnew_gc_flags();
+  }
+
 #ifdef SERIALGC
   assert(verify_serial_gc_flags(), "SerialGC unset");
 #endif // SERIALGC
@@ -2479,6 +2506,12 @@
     CommandLineFlags::printSetFlags();
   }
 
+#ifdef ASSERT
+  if (PrintFlagsFinal) {
+    CommandLineFlags::printFlags();
+  }
+#endif
+
   return JNI_OK;
 }
 
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -291,8 +291,6 @@
   static bool _CIDynamicCompilePriority;
   static intx _Tier2CompileThreshold;
 
-  // GC processing
-  static int nof_parallel_gc_threads();
   // CMS/ParNew garbage collectors
   static void set_parnew_gc_flags();
   static void set_cms_and_parnew_gc_flags();
@@ -385,6 +383,8 @@
  public:
   // Parses the arguments
   static jint parse(const JavaVMInitArgs* args);
+  // Check for consistency in the selection of the garbage collector.
+  static bool check_gc_consistency();
   // Check consistecy or otherwise of VM argument settings
   static bool check_vm_args_consistency();
   // Used by os_solaris
--- a/hotspot/src/share/vm/runtime/globals.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -205,6 +205,18 @@
   return (f->origin == DEFAULT);
 }
 
+bool CommandLineFlagsEx::is_ergo(CommandLineFlag flag) {
+  assert((size_t)flag < Flag::numFlags, "bad command line flag index");
+  Flag* f = &Flag::flags[flag];
+  return (f->origin == ERGONOMIC);
+}
+
+bool CommandLineFlagsEx::is_cmdline(CommandLineFlag flag) {
+  assert((size_t)flag < Flag::numFlags, "bad command line flag index");
+  Flag* f = &Flag::flags[flag];
+  return (f->origin == COMMAND_LINE);
+}
+
 bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) {
   Flag* result = Flag::find_flag((char*)name, strlen(name));
   if (result == NULL) return false;
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -1794,6 +1794,9 @@
           "number of times a GC thread (minus the coordinator) "            \
           "will sleep while yielding before giving up and resuming GC")     \
                                                                             \
+  notproduct(bool, PrintFlagsFinal, false,                                  \
+          "Print all command line flags after argument processing")         \
+                                                                            \
   /* gc tracing */                                                          \
   manageable(bool, PrintGC, false,                                          \
           "Print message at garbage collect")                               \
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -154,6 +154,8 @@
 } CommandLineFlagWithType;
 
 #define FLAG_IS_DEFAULT(name)         (CommandLineFlagsEx::is_default(FLAG_MEMBER(name)))
+#define FLAG_IS_ERGO(name)            (CommandLineFlagsEx::is_ergo(FLAG_MEMBER(name)))
+#define FLAG_IS_CMDLINE(name)         (CommandLineFlagsEx::is_cmdline(FLAG_MEMBER(name)))
 
 #define FLAG_SET_DEFAULT(name, value) ((name) = (value))
 
@@ -171,4 +173,6 @@
   static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
 
   static bool is_default(CommandLineFlag flag);
+  static bool is_ergo(CommandLineFlag flag);
+  static bool is_cmdline(CommandLineFlag flag);
 };
--- a/hotspot/src/share/vm/runtime/vm_version.cpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vm_version.cpp	Wed Mar 05 17:37:04 2008 -0800
@@ -52,6 +52,8 @@
 int Abstract_VM_Version::_vm_minor_version = 0;
 int Abstract_VM_Version::_vm_build_number = 0;
 bool Abstract_VM_Version::_initialized = false;
+int Abstract_VM_Version::_parallel_worker_threads = 0;
+bool Abstract_VM_Version::_parallel_worker_threads_initialized = false;
 
 void Abstract_VM_Version::initialize() {
   if (_initialized) {
@@ -210,3 +212,43 @@
   }
 #endif
 }
+
+unsigned int Abstract_VM_Version::nof_parallel_worker_threads(
+                                                      unsigned int num,
+                                                      unsigned int den,
+                                                      unsigned int switch_pt) {
+  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+    assert(ParallelGCThreads == 0, "Default ParallelGCThreads is not 0");
+    // For very large machines, there are diminishing returns
+    // for large numbers of worker threads.  Instead of
+    // hogging the whole system, use a fraction of the workers for every
+    // processor after the first 8.  For example, on a 72 cpu machine
+    // and a chosen fraction of 5/8
+    // use 8 + (72 - 8) * (5/8) == 48 worker threads.
+    unsigned int ncpus = (unsigned int) os::active_processor_count();
+    return (ncpus <= switch_pt) ?
+           ncpus :
+          (switch_pt + ((ncpus - switch_pt) * num) / den);
+  } else {
+    return ParallelGCThreads;
+  }
+}
+
+unsigned int Abstract_VM_Version::calc_parallel_worker_threads() {
+  return nof_parallel_worker_threads(5, 8, 8);
+}
+
+
+// Does not set the _initialized flag since it is
+// a global flag.
+unsigned int Abstract_VM_Version::parallel_worker_threads() {
+  if (!_parallel_worker_threads_initialized) {
+    if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+      _parallel_worker_threads = VM_Version::calc_parallel_worker_threads();
+    } else {
+      _parallel_worker_threads = ParallelGCThreads;
+    }
+    _parallel_worker_threads_initialized = true;
+  }
+  return _parallel_worker_threads;
+}
--- a/hotspot/src/share/vm/runtime/vm_version.hpp	Wed Jul 05 16:34:33 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vm_version.hpp	Wed Mar 05 17:37:04 2008 -0800
@@ -36,6 +36,12 @@
   static int          _vm_minor_version;
   static int          _vm_build_number;
   static bool         _initialized;
+  static int          _parallel_worker_threads;
+  static bool         _parallel_worker_threads_initialized;
+
+  static unsigned int nof_parallel_worker_threads(unsigned int num,
+                                                  unsigned int dem,
+                                                  unsigned int switch_pt);
  public:
   static void initialize();
 
@@ -69,4 +75,13 @@
   // subclasses should define new versions to hide this one as needed.  Note
   // that the O/S may support more sizes, but at most this many are used.
   static uint page_size_count() { return 2; }
+
+  // Returns the number of parallel threads to be used for VM
+  // work.  If that number has not been calculated, do so and
+  // save it.  Returns ParallelGCThreads if it is set on the
+  // command line.
+  static unsigned int parallel_worker_threads();
+  // Calculates and returns the number of parallel threads.  May
+  // be VM version specific.
+  static unsigned int calc_parallel_worker_threads();
 };