Merge
authoraph
Wed, 20 Apr 2016 11:11:56 +0000
changeset 38004 9a5433ffaa8a
parent 38003 f84c8ee82ac8 (current diff)
parent 38001 6a1924ec5269 (diff)
child 38005 5db1fc974323
Merge
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -43,7 +43,12 @@
 define_pd_global(intx,  OptoLoopAlignment,    16);
 define_pd_global(intx,  InlineFrequencyCount, 100);
 define_pd_global(intx,  InlineSmallCode,      1000);
-define_pd_global(intx,  InitArrayShortSize,   -1); // not used
+
+// not used, but must satisfy following constraints:
+// 1.) <VALUE> must be in the allowed range for intx *and*
+// 2.) <VALUE> % BytesPerLong == 0 so as to not
+//     violate the constraint verifier on JVM start-up.
+define_pd_global(intx,  InitArrayShortSize,   0);
 
 #define DEFAULT_STACK_YELLOW_PAGES (2)
 #define DEFAULT_STACK_RED_PAGES (1)
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,6 +85,12 @@
       tables. */
   public Symbol probe(byte[] name) {
     long hashValue = hashSymbol(name);
+
+    Symbol s = sharedTable.probe(name, hashValue);
+    if (s != null) {
+      return s;
+    }
+
     for (HashtableEntry e = (HashtableEntry) bucket(hashToIndex(hashValue)); e != null; e = (HashtableEntry) e.next()) {
       if (e.hash() == hashValue) {
          Symbol sym = Symbol.create(e.literalValue());
@@ -94,7 +100,7 @@
       }
     }
 
-    return sharedTable.probe(name, hashValue);
+    return null;
   }
 
   public interface SymbolVisitor {
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/CompactHashTable.java	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/CompactHashTable.java	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,21 +44,23 @@
     Type type = db.lookupType("SymbolCompactHashTable");
     baseAddressField = type.getAddressField("_base_address");
     bucketCountField = type.getCIntegerField("_bucket_count");
-    tableEndOffsetField = type.getCIntegerField("_table_end_offset");
+    entryCountField = type.getCIntegerField("_entry_count");
     bucketsField = type.getAddressField("_buckets");
-    uintSize = db.lookupType("juint").getSize();
+    entriesField = type.getAddressField("_entries");
+    uintSize = db.lookupType("u4").getSize();
   }
 
   // Fields
   private static CIntegerField bucketCountField;
-  private static CIntegerField tableEndOffsetField;
+  private static CIntegerField entryCountField;
   private static AddressField  baseAddressField;
   private static AddressField  bucketsField;
+  private static AddressField  entriesField;
   private static long uintSize;
 
   private static int BUCKET_OFFSET_MASK = 0x3FFFFFFF;
   private static int BUCKET_TYPE_SHIFT = 30;
-  private static int COMPACT_BUCKET_TYPE = 1;
+  private static int VALUE_ONLY_BUCKET_TYPE = 1;
 
   public CompactHashTable(Address addr) {
     super(addr);
@@ -68,12 +70,8 @@
     return (int)bucketCountField.getValue(addr);
   }
 
-  private int tableEndOffset() {
-    return (int)tableEndOffsetField.getValue(addr);
-  }
-
-  private boolean isCompactBucket(int bucket_info) {
-    return (bucket_info >> BUCKET_TYPE_SHIFT) == COMPACT_BUCKET_TYPE;
+  private boolean isValueOnlyBucket(int bucket_info) {
+    return (bucket_info >> BUCKET_TYPE_SHIFT) == VALUE_ONLY_BUCKET_TYPE;
   }
 
   private int bucketOffset(int bucket_info) {
@@ -81,9 +79,8 @@
   }
 
   public Symbol probe(byte[] name, long hash) {
-
-    if (bucketCount() == 0) {
-      // The table is invalid, so don't try to lookup
+    if (bucketCount() <= 0) {
+      // This CompactHashTable is not in use
       return null;
     }
 
@@ -91,34 +88,33 @@
     Symbol  sym;
     Address baseAddress = baseAddressField.getValue(addr);
     Address bucket = bucketsField.getValue(addr);
-    Address bucketEnd = bucket;
     long index = hash % bucketCount();
     int bucketInfo = (int)bucket.getCIntegerAt(index * uintSize, uintSize, true);
     int bucketOffset = bucketOffset(bucketInfo);
     int nextBucketInfo = (int)bucket.getCIntegerAt((index+1) * uintSize, uintSize, true);
     int nextBucketOffset = bucketOffset(nextBucketInfo);
 
-    bucket = bucket.addOffsetTo(bucketOffset * uintSize);
+    Address entry = entriesField.getValue(addr).addOffsetTo(bucketOffset * uintSize);
 
-    if (isCompactBucket(bucketInfo)) {
-      symOffset = bucket.getCIntegerAt(0, uintSize, true);
+    if (isValueOnlyBucket(bucketInfo)) {
+      symOffset = entry.getCIntegerAt(0, uintSize, true);
       sym = Symbol.create(baseAddress.addOffsetTo(symOffset));
       if (sym.equals(name)) {
         return sym;
       }
     } else {
-      bucketEnd = bucket.addOffsetTo(nextBucketOffset * uintSize);
-      while (bucket.lessThan(bucketEnd)) {
-        long symHash = bucket.getCIntegerAt(0, uintSize, true);
+      Address entryMax = entriesField.getValue(addr).addOffsetTo(nextBucketOffset * uintSize);
+      while (entry.lessThan(entryMax)) {
+        long symHash = entry.getCIntegerAt(0, uintSize, true);
         if (symHash == hash) {
-          symOffset = bucket.getCIntegerAt(uintSize, uintSize, true);
+          symOffset = entry.getCIntegerAt(uintSize, uintSize, true);
           Address symAddr = baseAddress.addOffsetTo(symOffset);
           sym = Symbol.create(symAddr);
           if (sym.equals(name)) {
             return sym;
           }
         }
-        bucket = bucket.addOffsetTo(2 * uintSize);
+        entry = entry.addOffsetTo(2 * uintSize);
       }
     }
     return null;
--- a/hotspot/src/share/vm/classfile/compactHashtable.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/compactHashtable.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/compactHashtable.inline.hpp"
 #include "classfile/javaClasses.hpp"
+#include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "prims/jvm.h"
 #include "utilities/numberSeq.hpp"
@@ -34,270 +35,259 @@
 //
 // The compact hash table writer implementations
 //
-CompactHashtableWriter::CompactHashtableWriter(int table_type,
-                                               int num_entries,
+CompactHashtableWriter::CompactHashtableWriter(int num_buckets,
                                                CompactHashtableStats* stats) {
   assert(DumpSharedSpaces, "dump-time only");
-  _type = table_type;
-  _num_entries = num_entries;
-  _num_buckets = number_of_buckets(_num_entries);
-  _buckets = NEW_C_HEAP_ARRAY(Entry*, _num_buckets, mtSymbol);
-  memset(_buckets, 0, sizeof(Entry*) * _num_buckets);
-
-  /* bucket sizes table */
-  _bucket_sizes = NEW_C_HEAP_ARRAY(juint, _num_buckets, mtSymbol);
-  memset(_bucket_sizes, 0, sizeof(juint) * _num_buckets);
+  _num_buckets = num_buckets;
+  _num_entries = 0;
+  _buckets = NEW_C_HEAP_ARRAY(GrowableArray<Entry>*, _num_buckets, mtSymbol);
+  for (int i=0; i<_num_buckets; i++) {
+    _buckets[i] = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Entry>(0, true, mtSymbol);
+  }
 
-  stats->hashentry_count = _num_entries;
-  // Compact buckets' entries will have only the 4-byte offset, but
-  // we don't know how many there will be at this point. So use a
-  // conservative estimate here. The size is adjusted later when we
-  // write out the buckets.
-  stats->hashentry_bytes = _num_entries * 8;
-  stats->bucket_count    = _num_buckets;
-  stats->bucket_bytes    = (_num_buckets + 1) * (sizeof(juint));
+  stats->bucket_count = _num_buckets;
+  stats->bucket_bytes = (_num_buckets + 1) * (sizeof(u4));
   _stats = stats;
-
-  // See compactHashtable.hpp for table layout
-  _required_bytes = sizeof(juint) * 2; // _base_address, written as 2 juints
-  _required_bytes+= sizeof(juint) +    // num_entries
-                    sizeof(juint) +    // num_buckets
-                    stats->hashentry_bytes +
-                    stats->bucket_bytes;
+  _compact_buckets = NULL;
+  _compact_entries = NULL;
+  _num_empty_buckets = 0;
+  _num_value_only_buckets = 0;
+  _num_other_buckets = 0;
 }
 
 CompactHashtableWriter::~CompactHashtableWriter() {
   for (int index = 0; index < _num_buckets; index++) {
-    Entry* next = NULL;
-    for (Entry* tent = _buckets[index]; tent; tent = next) {
-      next = tent->next();
-      delete tent;
-    }
+    GrowableArray<Entry>* bucket = _buckets[index];
+    delete bucket;
   }
 
-  FREE_C_HEAP_ARRAY(juint, _bucket_sizes);
-  FREE_C_HEAP_ARRAY(Entry*, _buckets);
-}
-
-// Calculate the number of buckets in the temporary hash table
-int CompactHashtableWriter::number_of_buckets(int num_entries) {
-  const int buksize = (int)SharedSymbolTableBucketSize;
-  int num_buckets = (num_entries + buksize - 1) / buksize;
-  num_buckets = (num_buckets + 1) & (~0x01);
-
-  return num_buckets;
+  FREE_C_HEAP_ARRAY(GrowableArray<Entry>*, _buckets);
 }
 
 // Add a symbol entry to the temporary hash table
-void CompactHashtableWriter::add(unsigned int hash, Entry* entry) {
+void CompactHashtableWriter::add(unsigned int hash, u4 value) {
   int index = hash % _num_buckets;
-  entry->set_next(_buckets[index]);
-  _buckets[index] = entry;
-  _bucket_sizes[index] ++;
+  _buckets[index]->append_if_missing(Entry(hash, value));
+  _num_entries++;
 }
 
-// Write the compact table's bucket infos
-juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket,
-                                          NumberSeq* summary) {
-  int index;
-  juint* compact_table = p;
-  // Compute the start of the buckets, include the compact_bucket_infos table
-  // and the table end offset.
-  juint offset = _num_buckets + 1;
-  *first_bucket = compact_table + offset;
+void CompactHashtableWriter::allocate_table() {
+  int entries_space = 0;
+  for (int index = 0; index < _num_buckets; index++) {
+    GrowableArray<Entry>* bucket = _buckets[index];
+    int bucket_size = bucket->length();
+    if (bucket_size == 1) {
+      entries_space++;
+    } else {
+      entries_space += 2 * bucket_size;
+    }
+  }
 
-  for (index = 0; index < _num_buckets; index++) {
-    int bucket_size = _bucket_sizes[index];
+  if (entries_space & ~BUCKET_OFFSET_MASK) {
+    vm_exit_during_initialization("CompactHashtableWriter::allocate_table: Overflow! "
+                                  "Too many entries.");
+  }
+
+  Thread* THREAD = VMThread::vm_thread();
+  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  _compact_buckets = MetadataFactory::new_array<u4>(loader_data, _num_buckets + 1, THREAD);
+  _compact_entries = MetadataFactory::new_array<u4>(loader_data, entries_space, THREAD);
+
+  _stats->hashentry_count = _num_entries;
+  _stats->hashentry_bytes = entries_space * sizeof(u4);
+}
+
+// Write the compact table's buckets
+void CompactHashtableWriter::dump_table(NumberSeq* summary) {
+  u4 offset = 0;
+  for (int index = 0; index < _num_buckets; index++) {
+    GrowableArray<Entry>* bucket = _buckets[index];
+    int bucket_size = bucket->length();
     if (bucket_size == 1) {
       // bucket with one entry is compacted and only has the symbol offset
-      compact_table[index] = BUCKET_INFO(offset, COMPACT_BUCKET_TYPE);
-      offset += bucket_size; // each entry contains symbol offset only
+      _compact_buckets->at_put(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
+
+      Entry ent = bucket->at(0);
+      _compact_entries->at_put(offset++, ent.value());
+      _num_value_only_buckets++;
     } else {
       // regular bucket, each entry is a symbol (hash, offset) pair
-      compact_table[index] = BUCKET_INFO(offset, REGULAR_BUCKET_TYPE);
-      offset += bucket_size * 2; // each hash entry is 2 juints
-    }
-    if (offset & ~BUCKET_OFFSET_MASK) {
-      vm_exit_during_initialization("CompactHashtableWriter::dump_table: Overflow! "
-                                    "Too many symbols.");
+      _compact_buckets->at_put(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
+
+      for (int i=0; i<bucket_size; i++) {
+        Entry ent = bucket->at(i);
+        _compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash
+        _compact_entries->at_put(offset++, ent.value());
+      }
+      if (bucket_size == 0) {
+        _num_empty_buckets++;
+      } else {
+        _num_other_buckets++;
+      }
     }
     summary->add(bucket_size);
   }
-  // Mark the end of the table
-  compact_table[_num_buckets] = BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE);
 
-  return compact_table;
+  // Mark the end of the buckets
+  _compact_buckets->at_put(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE));
+  assert(offset == (u4)_compact_entries->length(), "sanity");
 }
 
-// Write the compact table's entries
-juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
-                                            NumberSeq* summary) {
-  uintx base_address = 0;
-  uintx max_delta = 0;
-  int num_compact_buckets = 0;
-  if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
-    base_address = uintx(MetaspaceShared::shared_rs()->base());
-    max_delta    = uintx(MetaspaceShared::shared_rs()->size());
-    assert(max_delta <= MAX_SHARED_DELTA, "range check");
-  } else {
-    assert((_type == CompactHashtable<oop, char>::_string_table), "unknown table");
-    assert(UseCompressedOops, "UseCompressedOops is required");
-  }
-
-  assert(p != NULL, "sanity");
-  for (int index = 0; index < _num_buckets; index++) {
-    juint count = 0;
-    int bucket_size = _bucket_sizes[index];
-    int bucket_type = BUCKET_TYPE(compact_table[index]);
-
-    if (bucket_size == 1) {
-      assert(bucket_type == COMPACT_BUCKET_TYPE, "Bad bucket type");
-      num_compact_buckets ++;
-    }
-    for (Entry* tent = _buckets[index]; tent;
-         tent = tent->next()) {
-      if (bucket_type == REGULAR_BUCKET_TYPE) {
-        *p++ = juint(tent->hash()); // write entry hash
-      }
-      if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
-        uintx deltax = uintx(tent->value()) - base_address;
-        assert(deltax < max_delta, "range check");
-        juint delta = juint(deltax);
-        *p++ = delta; // write entry offset
-      } else {
-        *p++ = oopDesc::encode_heap_oop(tent->string());
-      }
-      count ++;
-    }
-    assert(count == _bucket_sizes[index], "sanity");
-  }
-
-  // Adjust the hashentry_bytes in CompactHashtableStats. Each compact
-  // bucket saves 4-byte.
-  _stats->hashentry_bytes -= num_compact_buckets * 4;
-
-  return p;
-}
 
 // Write the compact table
-void CompactHashtableWriter::dump(char** top, char* end) {
+void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table_name) {
   NumberSeq summary;
-  char* old_top = *top;
-  juint* p = (juint*)(*top);
-
-  uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
+  allocate_table();
+  dump_table(&summary);
 
-  // Now write the following at the beginning of the table:
-  //      base_address (uintx)
-  //      num_entries  (juint)
-  //      num_buckets  (juint)
-  *p++ = high(base_address);
-  *p++ = low (base_address); // base address
-  *p++ = _num_entries;  // number of entries in the table
-  *p++ = _num_buckets;  // number of buckets in the table
-
-  juint* first_bucket = NULL;
-  juint* compact_table = dump_table(p, &first_bucket, &summary);
-  juint* bucket_end = dump_buckets(compact_table, first_bucket, &summary);
-
-  assert(bucket_end <= (juint*)end, "cannot write past end");
-  *top = (char*)bucket_end;
+  int table_bytes = _stats->bucket_bytes + _stats->hashentry_bytes;
+  address base_address = address(MetaspaceShared::shared_rs()->base());
+  cht->init(base_address,  _num_entries, _num_buckets,
+            _compact_buckets->data(), _compact_entries->data());
 
   if (PrintSharedSpaces) {
     double avg_cost = 0.0;
     if (_num_entries > 0) {
-      avg_cost = double(_required_bytes)/double(_num_entries);
+      avg_cost = double(table_bytes)/double(_num_entries);
     }
     tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
-                  table_name(), (intptr_t)base_address);
+                  table_name, (intptr_t)base_address);
     tty->print_cr("Number of entries       : %9d", _num_entries);
-    tty->print_cr("Total bytes used        : %9d", (int)((*top) - old_top));
+    tty->print_cr("Total bytes used        : %9d", table_bytes);
     tty->print_cr("Average bytes per entry : %9.3f", avg_cost);
     tty->print_cr("Average bucket size     : %9.3f", summary.avg());
     tty->print_cr("Variance of bucket size : %9.3f", summary.variance());
     tty->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
-    tty->print_cr("Maximum bucket size     : %9d", (int)summary.maximum());
+    tty->print_cr("Empty buckets           : %9d", _num_empty_buckets);
+    tty->print_cr("Value_Only buckets      : %9d", _num_value_only_buckets);
+    tty->print_cr("Other buckets           : %9d", _num_other_buckets);
   }
 }
 
-const char* CompactHashtableWriter::table_name() {
-  switch (_type) {
-  case CompactHashtable<Symbol*, char>::_symbol_table: return "symbol";
-  case CompactHashtable<oop, char>::_string_table: return "string";
-  default:
-    ;
-  }
-  return "unknown";
+/////////////////////////////////////////////////////////////
+//
+// Customization for dumping Symbol and String tables
+
+void CompactSymbolTableWriter::add(unsigned int hash, Symbol *symbol) {
+  address base_address = address(MetaspaceShared::shared_rs()->base());
+  uintx max_delta = uintx(MetaspaceShared::shared_rs()->size());
+  assert(max_delta <= MAX_SHARED_DELTA, "range check");
+
+  uintx deltax = address(symbol) - base_address;
+  assert(deltax < max_delta, "range check");
+  u4 delta = u4(deltax);
+
+  CompactHashtableWriter::add(hash, delta);
+}
+
+void CompactStringTableWriter::add(unsigned int hash, oop string) {
+  CompactHashtableWriter::add(hash, oopDesc::encode_heap_oop(string));
+}
+
+void CompactSymbolTableWriter::dump(CompactHashtable<Symbol*, char> *cht) {
+  CompactHashtableWriter::dump(cht, "symbol");
+}
+
+void CompactStringTableWriter::dump(CompactHashtable<oop, char> *cht) {
+  CompactHashtableWriter::dump(cht, "string");
 }
 
 /////////////////////////////////////////////////////////////
 //
 // The CompactHashtable implementation
 //
-template <class T, class N> const char* CompactHashtable<T, N>::init(
-                           CompactHashtableType type, const char* buffer) {
-  assert(!DumpSharedSpaces, "run-time only");
-  _type = type;
-  juint*p = (juint*)buffer;
-  juint upper = *p++;
-  juint lower = *p++;
-  _base_address = uintx(jlong_from(upper, lower));
-  _entry_count = *p++;
-  _bucket_count = *p++;
-  _buckets = p;
-  _table_end_offset = BUCKET_OFFSET(p[_bucket_count]); // located at the end of the bucket_info table
 
-  juint *end = _buckets + _table_end_offset;
-  return (const char*)end;
+void SimpleCompactHashtable::serialize(SerializeClosure* soc) {
+  soc->do_ptr((void**)&_base_address);
+  soc->do_u4(&_entry_count);
+  soc->do_u4(&_bucket_count);
+  soc->do_ptr((void**)&_buckets);
+  soc->do_ptr((void**)&_entries);
 }
 
-template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosure *cl) {
+bool SimpleCompactHashtable::exists(u4 value) {
   assert(!DumpSharedSpaces, "run-time only");
-  for (juint i = 0; i < _bucket_count; i ++) {
-    juint bucket_info = _buckets[i];
-    juint bucket_offset = BUCKET_OFFSET(bucket_info);
-    int   bucket_type = BUCKET_TYPE(bucket_info);
-    juint* bucket = _buckets + bucket_offset;
-    juint* bucket_end = _buckets;
+
+  if (_entry_count == 0) {
+    return false;
+  }
+
+  unsigned int hash = (unsigned int)value;
+  int index = hash % _bucket_count;
+  u4 bucket_info = _buckets[index];
+  u4 bucket_offset = BUCKET_OFFSET(bucket_info);
+  int bucket_type = BUCKET_TYPE(bucket_info);
+  u4* entry = _entries + bucket_offset;
 
-    Symbol* sym;
-    if (bucket_type == COMPACT_BUCKET_TYPE) {
-      sym = (Symbol*)((void*)(_base_address + bucket[0]));
-      cl->do_symbol(&sym);
+  if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
+    return (entry[0] == value);
+  } else {
+    u4*entry_max = _entries + BUCKET_OFFSET(_buckets[index + 1]);
+    while (entry <entry_max) {
+      if (entry[1] == value) {
+        return true;
+      }
+      entry += 2;
+    }
+    return false;
+  }
+}
+
+template <class I>
+inline void SimpleCompactHashtable::iterate(const I& iterator) {
+  assert(!DumpSharedSpaces, "run-time only");
+  for (u4 i = 0; i < _bucket_count; i++) {
+    u4 bucket_info = _buckets[i];
+    u4 bucket_offset = BUCKET_OFFSET(bucket_info);
+    int bucket_type = BUCKET_TYPE(bucket_info);
+    u4* entry = _entries + bucket_offset;
+
+    if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
+      iterator.do_value(_base_address, entry[0]);
     } else {
-      bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
-      while (bucket < bucket_end) {
-        sym = (Symbol*)((void*)(_base_address + bucket[1]));
-        cl->do_symbol(&sym);
-        bucket += 2;
+      u4*entry_max = _entries + BUCKET_OFFSET(_buckets[i + 1]);
+      while (entry < entry_max) {
+        iterator.do_value(_base_address, entry[0]);
+        entry += 2;
       }
     }
   }
 }
 
-template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* f) {
-  assert(!DumpSharedSpaces, "run-time only");
-  assert(_type == _string_table || _bucket_count == 0, "sanity");
-  for (juint i = 0; i < _bucket_count; i ++) {
-    juint bucket_info = _buckets[i];
-    juint bucket_offset = BUCKET_OFFSET(bucket_info);
-    int   bucket_type = BUCKET_TYPE(bucket_info);
-    juint* bucket = _buckets + bucket_offset;
-    juint* bucket_end = _buckets;
+template <class T, class N> void CompactHashtable<T, N>::serialize(SerializeClosure* soc) {
+  SimpleCompactHashtable::serialize(soc);
+  soc->do_u4(&_type);
+}
+
+class CompactHashtable_SymbolIterator {
+  SymbolClosure* const _closure;
+public:
+  CompactHashtable_SymbolIterator(SymbolClosure *cl) : _closure(cl) {}
+  inline void do_value(address base_address, u4 offset) const {
+    Symbol* sym = (Symbol*)((void*)(base_address + offset));
+    _closure->do_symbol(&sym);
+  }
+};
 
-    narrowOop o;
-    if (bucket_type == COMPACT_BUCKET_TYPE) {
-      o = (narrowOop)bucket[0];
-      f->do_oop(&o);
-    } else {
-      bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
-      while (bucket < bucket_end) {
-        o = (narrowOop)bucket[1];
-        f->do_oop(&o);
-        bucket += 2;
-      }
-    }
+template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosure *cl) {
+  CompactHashtable_SymbolIterator iterator(cl);
+  iterate(iterator);
+}
+
+class CompactHashtable_OopIterator {
+  OopClosure* const _closure;
+public:
+  CompactHashtable_OopIterator(OopClosure *cl) : _closure(cl) {}
+  inline void do_value(address base_address, u4 offset) const {
+    narrowOop o = (narrowOop)offset;
+    _closure->do_oop(&o);
   }
+};
+
+template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* cl) {
+  assert(_type == _string_table || _bucket_count == 0, "sanity");
+  CompactHashtable_OopIterator iterator(cl);
+  iterate(iterator);
 }
 
 // Explicitly instantiate these types
@@ -360,7 +350,7 @@
   } else {
     corrupted(_p, "Unexpected character");
   }
-  _line_no ++;
+  _line_no++;
   return true;
 }
 
@@ -390,7 +380,7 @@
 }
 
 void HashtableTextDump::scan_prefix_type() {
-  _p ++;
+  _p++;
   if (strncmp(_p, "SECTION: String", 15) == 0) {
     _p += 15;
     _prefix_type = StringPrefix;
--- a/hotspot/src/share/vm/classfile/compactHashtable.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/compactHashtable.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,10 @@
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.hpp"
 
+template <class T, class N> class CompactHashtable;
 class NumberSeq;
+class SimpleCompactHashtable;
+class SerializeClosure;
 
 // Stats for symbol tables in the CDS archive
 class CompactHashtableStats VALUE_OBJ_CLASS_SPEC {
@@ -70,66 +73,74 @@
 //
 class CompactHashtableWriter: public StackObj {
 public:
-  class Entry: public CHeapObj<mtSymbol> {
-    Entry* _next;
+  class Entry VALUE_OBJ_CLASS_SPEC {
     unsigned int _hash;
-    void* _literal;
+    u4 _value;
 
   public:
-    Entry(unsigned int hash, Symbol *symbol) : _next(NULL), _hash(hash), _literal(symbol) {}
-    Entry(unsigned int hash, oop string)     : _next(NULL), _hash(hash), _literal(string) {}
+    Entry() {}
+    Entry(unsigned int hash, u4 val) : _hash(hash), _value(val) {}
 
-    void *value() {
-      return _literal;
-    }
-    Symbol *symbol() {
-      return (Symbol*)_literal;
-    }
-    oop string() {
-      return (oop)_literal;
+    u4 value() {
+      return _value;
     }
     unsigned int hash() {
       return _hash;
     }
-    Entry *next()           {return _next;}
-    void set_next(Entry *p) {_next = p;}
+
+    bool operator==(const CompactHashtableWriter::Entry& other) {
+      return (_value == other._value && _hash == other._hash);
+    }
   }; // class CompactHashtableWriter::Entry
 
 private:
-  static int number_of_buckets(int num_entries);
-
-  int _type;
   int _num_entries;
   int _num_buckets;
-  juint* _bucket_sizes;
-  Entry** _buckets;
-  int _required_bytes;
+  int _num_empty_buckets;
+  int _num_value_only_buckets;
+  int _num_other_buckets;
+  GrowableArray<Entry>** _buckets;
   CompactHashtableStats* _stats;
+  Array<u4>* _compact_buckets;
+  Array<u4>* _compact_entries;
 
 public:
   // This is called at dump-time only
-  CompactHashtableWriter(int table_type, int num_entries, CompactHashtableStats* stats);
+  CompactHashtableWriter(int num_buckets, CompactHashtableStats* stats);
   ~CompactHashtableWriter();
 
-  int get_required_bytes() {
-    return _required_bytes;
+  void add(unsigned int hash, u4 value);
+  void add(u4 value) {
+    add((unsigned int)value, value);
   }
 
-  inline void add(unsigned int hash, Symbol* symbol);
-  inline void add(unsigned int hash, oop string);
-
 private:
-  void add(unsigned int hash, Entry* entry);
-  juint* dump_table(juint* p, juint** first_bucket, NumberSeq* summary);
-  juint* dump_buckets(juint* table, juint* p, NumberSeq* summary);
+  void allocate_table();
+  void dump_table(NumberSeq* summary);
 
 public:
-  void dump(char** top, char* end);
+  void dump(SimpleCompactHashtable *cht, const char* table_name);
   const char* table_name();
 };
 
+class CompactSymbolTableWriter: public CompactHashtableWriter {
+public:
+  CompactSymbolTableWriter(int num_buckets, CompactHashtableStats* stats) :
+    CompactHashtableWriter(num_buckets, stats) {}
+  void add(unsigned int hash, Symbol *symbol);
+  void dump(CompactHashtable<Symbol*, char> *cht);
+};
+
+class CompactStringTableWriter: public CompactHashtableWriter {
+public:
+  CompactStringTableWriter(int num_entries, CompactHashtableStats* stats) :
+    CompactHashtableWriter(num_entries, stats) {}
+  void add(unsigned int hash, oop string);
+  void dump(CompactHashtable<oop, char> *cht);
+};
+
 #define REGULAR_BUCKET_TYPE       0
-#define COMPACT_BUCKET_TYPE       1
+#define VALUE_ONLY_BUCKET_TYPE    1
 #define TABLEEND_BUCKET_TYPE      3
 #define BUCKET_OFFSET_MASK        0x3FFFFFFF
 #define BUCKET_OFFSET(info)       ((info) & BUCKET_OFFSET_MASK)
@@ -146,90 +157,106 @@
 // and tend to have large number of entries, we try to minimize the footprint
 // cost per entry.
 //
-// Layout of compact table in the shared archive:
+// The CompactHashtable is split into two arrays
 //
-//   uintx base_address;
-//   juint num_entries;
-//   juint num_buckets;
-//   juint bucket_infos[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
-//   juint table[]
+//   u4 buckets[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
+//   u4 entries[<variable size>]
 //
-// -----------------------------------
-// | base_address  | num_entries     |
-// |---------------------------------|
-// | num_buckets   | bucket_info0    |
-// |---------------------------------|
-// | bucket_info1  | bucket_info2    |
-// | bucket_info3    ...             |
-// | ....          | table_end_info  |
-// |---------------------------------|
-// | entry0                          |
-// | entry1                          |
-// | entry2                          |
-// |                                 |
-// | ...                             |
-// -----------------------------------
+// The size of buckets[] is 'num_buckets + 1'. Each entry of
+// buckets[] is a 32-bit encoding of the bucket type and bucket offset,
+// with the type in the left-most 2-bit and offset in the remaining 30-bit.
+// The last entry is a special type. It contains the end of the last
+// bucket.
 //
-// The size of the bucket_info table is 'num_buckets + 1'. Each entry of the
-// bucket_info table is a 32-bit encoding of the bucket type and bucket offset,
-// with the type in the left-most 2-bit and offset in the remaining 30-bit.
-// The last entry is a special type. It contains the offset of the last
-// bucket end. We use that information when traversing the compact table.
-//
-// There are two types of buckets, regular buckets and compact buckets. The
-// compact buckets have '01' in their highest 2-bit, and regular buckets have
+// There are two types of buckets, regular buckets and value_only buckets. The
+// value_only buckets have '01' in their highest 2-bit, and regular buckets have
 // '00' in their highest 2-bit.
 //
-// For normal buckets, each entry is 8 bytes in the table[]:
-//   juint hash;    /* symbol/string hash */
+// For normal buckets, each entry is 8 bytes in the entries[]:
+//   u4 hash;    /* symbol/string hash */
 //   union {
-//     juint offset;  /* Symbol* sym = (Symbol*)(base_address + offset) */
+//     u4 offset;  /* Symbol* sym = (Symbol*)(base_address + offset) */
 //     narrowOop str; /* String narrowOop encoding */
 //   }
 //
 //
-// For compact buckets, each entry has only the 4-byte 'offset' in the table[].
+// For value_only buckets, each entry has only the 4-byte 'offset' in the entries[].
+//
+// Example -- note that the second bucket is a VALUE_ONLY_BUCKET_TYPE so the hash code
+//            is skipped.
+// buckets[0, 4, 5, ....]
+//         |  |  |
+//         |  |  +---+
+//         |  |      |
+//         |  +----+ |
+//         v       v v
+// entries[H,O,H,O,O,H,O,H,O.....]
 //
 // See CompactHashtable::lookup() for how the table is searched at runtime.
 // See CompactHashtableWriter::dump() for how the table is written at CDS
 // dump time.
 //
-template <class T, class N> class CompactHashtable VALUE_OBJ_CLASS_SPEC {
+class SimpleCompactHashtable VALUE_OBJ_CLASS_SPEC {
+protected:
+  address  _base_address;
+  u4  _bucket_count;
+  u4  _entry_count;
+  u4* _buckets;
+  u4* _entries;
+
+public:
+  SimpleCompactHashtable() {
+    _entry_count = 0;
+    _bucket_count = 0;
+    _buckets = 0;
+    _entries = 0;
+  }
+
+  void reset() {
+    _bucket_count = 0;
+    _entry_count = 0;
+    _buckets = 0;
+    _entries = 0;
+  }
+
+  void init(address base_address, u4 entry_count, u4 bucket_count, u4* buckets, u4* entries) {
+    _base_address = base_address;
+    _bucket_count = bucket_count;
+    _entry_count = entry_count;
+    _buckets = buckets;
+    _entries = entries;
+  }
+
+  template <class I> inline void iterate(const I& iterator);
+
+  bool exists(u4 value);
+
+  // For reading from/writing to the CDS archive
+  void serialize(SerializeClosure* soc);
+};
+
+template <class T, class N> class CompactHashtable : public SimpleCompactHashtable {
   friend class VMStructs;
 
- public:
+public:
   enum CompactHashtableType {
     _symbol_table = 0,
     _string_table = 1
   };
 
 private:
-  CompactHashtableType _type;
-  uintx  _base_address;
-  juint  _entry_count;
-  juint  _bucket_count;
-  juint  _table_end_offset;
-  juint* _buckets;
+  u4 _type;
 
-  inline Symbol* lookup_entry(CompactHashtable<Symbol*, char>* const t,
-                              juint* addr, const char* name, int len);
+  inline Symbol* decode_entry(CompactHashtable<Symbol*, char>* const t,
+                              u4 offset, const char* name, int len);
 
-  inline oop lookup_entry(CompactHashtable<oop, char>* const t,
-                          juint* addr, const char* name, int len);
+  inline oop decode_entry(CompactHashtable<oop, char>* const t,
+                          u4 offset, const char* name, int len);
 public:
-  CompactHashtable() {
-    _entry_count = 0;
-    _bucket_count = 0;
-    _table_end_offset = 0;
-    _buckets = 0;
-  }
-  const char* init(CompactHashtableType type, const char *buffer);
+  CompactHashtable() : SimpleCompactHashtable() {}
 
-  void reset() {
-    _entry_count = 0;
-    _bucket_count = 0;
-    _table_end_offset = 0;
-    _buckets = 0;
+  void set_type(CompactHashtableType type) {
+    _type = (u4)type;
   }
 
   // Lookup an entry from the compact table
@@ -240,6 +267,9 @@
 
   // iterate over strings
   void oops_do(OopClosure* f);
+
+  // For reading from/writing to the CDS archive
+  void serialize(SerializeClosure* soc);
 };
 
 ////////////////////////////////////////////////////////////////////////
@@ -293,7 +323,7 @@
     u8 n = 0;
 
     while (p < end) {
-      char c = *p ++;
+      char c = *p++;
       if ('0' <= c && c <= '9') {
         n = n * 10 + (c - '0');
         if (n > (u8)INT_MAX) {
--- a/hotspot/src/share/vm/classfile/compactHashtable.inline.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/compactHashtable.inline.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,9 +30,9 @@
 #include "oops/oop.inline.hpp"
 
 template <class T, class N>
-inline Symbol* CompactHashtable<T, N>::lookup_entry(CompactHashtable<Symbol*, char>* const t,
-                                             juint* addr, const char* name, int len) {
-  Symbol* sym = (Symbol*)((void*)(_base_address + *addr));
+inline Symbol* CompactHashtable<T, N>::decode_entry(CompactHashtable<Symbol*, char>* const t,
+                                                    u4 offset, const char* name, int len) {
+  Symbol* sym = (Symbol*)(_base_address + offset);
   if (sym->equals(name, len)) {
     assert(sym->refcount() == -1, "must be shared");
     return sym;
@@ -42,9 +42,9 @@
 }
 
 template <class T, class N>
-inline oop CompactHashtable<T, N>::lookup_entry(CompactHashtable<oop, char>* const t,
-                                                juint* addr, const char* name, int len) {
-  narrowOop obj = (narrowOop)(*addr);
+inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
+                                                u4 offset, const char* name, int len) {
+  narrowOop obj = (narrowOop)offset;
   oop string = oopDesc::decode_heap_oop(obj);
   if (java_lang_String::equals(string, (jchar*)name, len)) {
     return string;
@@ -56,17 +56,14 @@
 template <class T, class N>
 inline T CompactHashtable<T,N>::lookup(const N* name, unsigned int hash, int len) {
   if (_entry_count > 0) {
-    assert(!DumpSharedSpaces, "run-time only");
     int index = hash % _bucket_count;
-    juint bucket_info = _buckets[index];
-    juint bucket_offset = BUCKET_OFFSET(bucket_info);
-    int   bucket_type = BUCKET_TYPE(bucket_info);
-    juint* bucket = _buckets + bucket_offset;
-    juint* bucket_end = _buckets;
+    u4 bucket_info = _buckets[index];
+    u4 bucket_offset = BUCKET_OFFSET(bucket_info);
+    int bucket_type = BUCKET_TYPE(bucket_info);
+    u4* entry = _entries + bucket_offset;
 
-    if (bucket_type == COMPACT_BUCKET_TYPE) {
-      // the compact bucket has one entry with entry offset only
-      T res = lookup_entry(this, &bucket[0], name, len);
+    if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
+      T res = decode_entry(this, entry[0], name, len);
       if (res != NULL) {
         return res;
       }
@@ -74,29 +71,20 @@
       // This is a regular bucket, which has more than one
       // entries. Each entry is a pair of entry (hash, offset).
       // Seek until the end of the bucket.
-      bucket_end += BUCKET_OFFSET(_buckets[index + 1]);
-      while (bucket < bucket_end) {
-        unsigned int h = (unsigned int)(bucket[0]);
+      u4* entry_max = _entries + BUCKET_OFFSET(_buckets[index + 1]);
+      while (entry < entry_max) {
+        unsigned int h = (unsigned int)(entry[0]);
         if (h == hash) {
-          T res = lookup_entry(this, &bucket[1], name, len);
+          T res = decode_entry(this, entry[1], name, len);
           if (res != NULL) {
             return res;
           }
         }
-        bucket += 2;
+        entry += 2;
       }
     }
   }
   return NULL;
 }
 
-inline void CompactHashtableWriter::add(unsigned int hash, Symbol* symbol) {
-  add(hash, new Entry(hash, symbol));
-}
-
-inline void CompactHashtableWriter::add(unsigned int hash, oop string) {
-  add(hash, new Entry(hash, string));
-}
-
-
 #endif // SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
--- a/hotspot/src/share/vm/classfile/stringTable.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -662,7 +662,7 @@
 
 // Sharing
 bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
-                                     CompactHashtableWriter* ch_table) {
+                                     CompactStringTableWriter* writer) {
 #if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
   assert(UseG1GC, "Only support G1 GC");
   assert(UseCompressedOops && UseCompressedClassPointers,
@@ -713,7 +713,7 @@
       }
 
       // add to the compact table
-      ch_table->add(hash, new_s);
+      writer->add(hash, new_s);
     }
   }
 
@@ -723,40 +723,41 @@
   return true;
 }
 
-bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemRegion> *string_space,
-                                     size_t* space_size) {
+void StringTable::serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
+                            size_t* space_size) {
 #if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
-  if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
-    if (PrintSharedSpaces) {
-      tty->print_cr("Shared strings are excluded from the archive as UseG1GC, "
-                    "UseCompressedOops and UseCompressedClassPointers are required.");
+  _shared_table.reset();
+  if (soc->writing()) {
+    if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
+      if (PrintSharedSpaces) {
+        tty->print_cr("Shared strings are excluded from the archive as UseG1GC, "
+                      "UseCompressedOops and UseCompressedClassPointers are required.");
+      }
+    } else {
+      int num_buckets = the_table()->number_of_entries() /
+                             SharedSymbolTableBucketSize;
+      CompactStringTableWriter writer(num_buckets,
+                                      &MetaspaceShared::stats()->string);
+
+      // Copy the interned strings into the "string space" within the java heap
+      if (copy_shared_string(string_space, &writer)) {
+        for (int i = 0; i < string_space->length(); i++) {
+          *space_size += string_space->at(i).byte_size();
+        }
+        writer.dump(&_shared_table);
+      }
     }
-    return true;
   }
 
-  CompactHashtableWriter ch_table(CompactHashtable<oop, char>::_string_table,
-                                  the_table()->number_of_entries(),
-                                  &MetaspaceShared::stats()->string);
+  _shared_table.set_type(CompactHashtable<oop, char>::_string_table);
+  _shared_table.serialize(soc);
 
-  // Copy the interned strings into the "string space" within the java heap
-  if (!copy_shared_string(string_space, &ch_table)) {
-    return false;
-  }
-
-  for (int i = 0; i < string_space->length(); i++) {
-    *space_size += string_space->at(i).byte_size();
+  if (soc->writing()) {
+    _shared_table.reset(); // Sanity. Make sure we don't use the shared table at dump time
+  } else if (_ignore_shared_strings) {
+    _shared_table.reset();
   }
-
-  // Now dump the compact table
-  if (*top + ch_table.get_required_bytes() > end) {
-    // not enough space left
-    return false;
-  }
-  ch_table.dump(top, end);
-  *top = (char*)align_ptr_up(*top, sizeof(void*));
-
 #endif
-  return true;
 }
 
 void StringTable::shared_oops_do(OopClosure* f) {
@@ -765,25 +766,3 @@
 #endif
 }
 
-const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
-#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
-  if (mapinfo->space_capacity(MetaspaceShared::first_string) == 0) {
-    // no shared string data
-    return buffer;
-  }
-
-  // initialize the shared table
-  juint *p = (juint*)buffer;
-  const char* end = _shared_table.init(
-          CompactHashtable<oop, char>::_string_table, (char*)p);
-  const char* aligned_end = (const char*)align_ptr_up(end, sizeof(void*));
-
-  if (_ignore_shared_strings) {
-    _shared_table.reset();
-  }
-
-  return aligned_end;
-#endif
-
-  return buffer;
-}
--- a/hotspot/src/share/vm/classfile/stringTable.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/stringTable.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -29,8 +29,9 @@
 #include "utilities/hashtable.hpp"
 
 template <class T, class N> class CompactHashtable;
-class CompactHashtableWriter;
+class CompactStringTableWriter;
 class FileMapInfo;
+class SerializeClosure;
 
 class StringTable : public RehashableHashtable<oop, mtSymbol> {
   friend class VMStructs;
@@ -155,10 +156,9 @@
   static bool shared_string_ignored()       { return _ignore_shared_strings; }
   static void shared_oops_do(OopClosure* f);
   static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
-                                 CompactHashtableWriter* ch_table);
-  static bool copy_compact_table(char** top, char* end, GrowableArray<MemRegion> *string_space,
-                                 size_t* space_size);
-  static const char* init_shared_table(FileMapInfo *mapinfo, char* buffer);
+                                 CompactStringTableWriter* ch_table);
+  static void serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
+                        size_t* space_size);
   static void reverse() {
     the_table()->Hashtable<oop, mtSymbol>::reverse();
   }
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -537,37 +537,42 @@
   }
 }
 
-bool SymbolTable::copy_compact_table(char** top, char*end) {
+void SymbolTable::serialize(SerializeClosure* soc) {
 #if INCLUDE_CDS
-  CompactHashtableWriter ch_table(CompactHashtable<Symbol*, char>::_symbol_table,
-                                  the_table()->number_of_entries(),
-                                  &MetaspaceShared::stats()->symbol);
-  if (*top + ch_table.get_required_bytes() > end) {
-    // not enough space left
-    return false;
+  _shared_table.reset();
+  if (soc->writing()) {
+    int num_buckets = the_table()->number_of_entries() /
+                            SharedSymbolTableBucketSize;
+    CompactSymbolTableWriter writer(num_buckets,
+                                    &MetaspaceShared::stats()->symbol);
+    for (int i = 0; i < the_table()->table_size(); ++i) {
+      HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
+      for ( ; p != NULL; p = p->next()) {
+        Symbol* s = (Symbol*)(p->literal());
+      unsigned int fixed_hash =  hash_shared_symbol((char*)s->bytes(), s->utf8_length());
+        assert(fixed_hash == p->hash(), "must not rehash during dumping");
+        writer.add(fixed_hash, s);
+      }
+    }
+
+    writer.dump(&_shared_table);
   }
 
-  for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
-    for ( ; p != NULL; p = p->next()) {
-      Symbol* s = (Symbol*)(p->literal());
-      unsigned int fixed_hash =  hash_shared_symbol((char*)s->bytes(), s->utf8_length());
-      assert(fixed_hash == p->hash(), "must not rehash during dumping");
-      ch_table.add(fixed_hash, s);
-    }
-  }
+  _shared_table.set_type(CompactHashtable<Symbol*, char>::_symbol_table);
+  _shared_table.serialize(soc);
 
-  ch_table.dump(top, end);
+  if (soc->writing()) {
+    // Verify table is correct
+    Symbol* sym = vmSymbols::java_lang_Object();
+    const char* name = (const char*)sym->bytes();
+    int len = sym->utf8_length();
+    unsigned int hash = hash_symbol(name, len);
+    assert(sym == _shared_table.lookup(name, hash, len), "sanity");
 
-  *top = (char*)align_ptr_up(*top, sizeof(void*));
+    // Sanity. Make sure we don't use the shared table at dump time
+    _shared_table.reset();
+  }
 #endif
-  return true;
-}
-
-const char* SymbolTable::init_shared_table(const char* buffer) {
-  const char* end = _shared_table.init(
-          CompactHashtable<Symbol*, char>::_symbol_table, buffer);
-  return (const char*)align_ptr_up(end, sizeof(void*));
 }
 
 //---------------------------------------------------------------------------
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -41,6 +41,7 @@
 
 class BoolObjectClosure;
 class outputStream;
+class SerializeClosure;
 
 // TempNewSymbol acts as a handle class in a handle/body idiom and is
 // responsible for proper resource management of the body (which is a Symbol*).
@@ -251,8 +252,7 @@
   static void read(const char* filename, TRAPS);
 
   // Sharing
-  static bool copy_compact_table(char** top, char* end);
-  static const char* init_shared_table(const char* buffer);
+  static void serialize(SerializeClosure* soc);
 
   // Rehash the symbol table if it gets out of balance
   static void rehash_table();
--- a/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -29,6 +29,7 @@
 #include "classfile/dictionary.hpp"
 
 class ClassFileStream;
+class SerializeClosure;
 
 class SystemDictionaryShared: public SystemDictionary {
 public:
@@ -77,6 +78,7 @@
                                            TRAPS) {
     return NULL;
   }
+  static void serialize(SerializeClosure* soc) {}
 };
 
 #endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -2769,10 +2769,10 @@
       _collector(collector),
       _n_workers(n_workers) {}
   // Work method in support of parallel rescan ... of young gen spaces
-  void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
+  void do_young_space_rescan(OopsInGenClosure* cl,
                              ContiguousSpace* space,
                              HeapWord** chunk_array, size_t chunk_top);
-  void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
+  void work_on_young_gen_roots(OopsInGenClosure* cl);
 };
 
 // Parallel initial mark task
@@ -4255,7 +4255,7 @@
 
   // ---------- young gen roots --------------
   {
-    work_on_young_gen_roots(worker_id, &par_mri_cl);
+    work_on_young_gen_roots(&par_mri_cl);
     _timer.stop();
     log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   }
@@ -4346,7 +4346,7 @@
   }
 };
 
-void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
+void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
   ParNewGeneration* young_gen = _collector->_young_gen;
   ContiguousSpace* eden_space = young_gen->eden();
   ContiguousSpace* from_space = young_gen->from();
@@ -4360,9 +4360,9 @@
   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
 
-  do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
-  do_young_space_rescan(worker_id, cl, from_space, sca, sct);
-  do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
+  do_young_space_rescan(cl, to_space, NULL, 0);
+  do_young_space_rescan(cl, from_space, sca, sct);
+  do_young_space_rescan(cl, eden_space, eca, ect);
 }
 
 // work_queue(i) is passed to the closure
@@ -4389,7 +4389,7 @@
   // work first.
   // ---------- young gen roots --------------
   {
-    work_on_young_gen_roots(worker_id, &par_mrias_cl);
+    work_on_young_gen_roots(&par_mrias_cl);
     _timer.stop();
     log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   }
@@ -4471,9 +4471,8 @@
   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
 }
 
-// Note that parameter "i" is not used.
 void
-CMSParMarkTask::do_young_space_rescan(uint worker_id,
+CMSParMarkTask::do_young_space_rescan(
   OopsInGenClosure* cl, ContiguousSpace* space,
   HeapWord** chunk_array, size_t chunk_top) {
   // Until all tasks completed:
--- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -27,44 +27,175 @@
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
-#include "gc/g1/g1Predictions.hpp"
 #include "runtime/java.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/pair.hpp"
+#include <math.h>
 
-ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictor) :
+// Arbitrary but large limits, to simplify some of the zone calculations.
+// The general idea is to allow expressions like
+//   MIN2(x OP y, max_XXX_zone)
+// without needing to check for overflow in "x OP y", because the
+// ranges for x and y have been restricted.
+STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2));
+const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort);
+const size_t max_green_zone = max_yellow_zone / 2;
+const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_queue.
+STATIC_ASSERT(max_yellow_zone <= max_red_zone);
+
+// Range check assertions for green zone values.
+#define assert_zone_constraints_g(green)                        \
+  do {                                                          \
+    size_t azc_g_green = (green);                               \
+    assert(azc_g_green <= max_green_zone,                       \
+           "green exceeds max: " SIZE_FORMAT, azc_g_green);     \
+  } while (0)
+
+// Range check assertions for green and yellow zone values.
+#define assert_zone_constraints_gy(green, yellow)                       \
+  do {                                                                  \
+    size_t azc_gy_green = (green);                                      \
+    size_t azc_gy_yellow = (yellow);                                    \
+    assert_zone_constraints_g(azc_gy_green);                            \
+    assert(azc_gy_yellow <= max_yellow_zone,                            \
+           "yellow exceeds max: " SIZE_FORMAT, azc_gy_yellow);          \
+    assert(azc_gy_green <= azc_gy_yellow,                               \
+           "green (" SIZE_FORMAT ") exceeds yellow (" SIZE_FORMAT ")",  \
+           azc_gy_green, azc_gy_yellow);                                \
+  } while (0)
+
+// Range check assertions for green, yellow, and red zone values.
+#define assert_zone_constraints_gyr(green, yellow, red)                 \
+  do {                                                                  \
+    size_t azc_gyr_green = (green);                                     \
+    size_t azc_gyr_yellow = (yellow);                                   \
+    size_t azc_gyr_red = (red);                                         \
+    assert_zone_constraints_gy(azc_gyr_green, azc_gyr_yellow);          \
+    assert(azc_gyr_red <= max_red_zone,                                 \
+           "red exceeds max: " SIZE_FORMAT, azc_gyr_red);               \
+    assert(azc_gyr_yellow <= azc_gyr_red,                               \
+           "yellow (" SIZE_FORMAT ") exceeds red (" SIZE_FORMAT ")",    \
+           azc_gyr_yellow, azc_gyr_red);                                \
+  } while (0)
+
+// Logging tag sequence for refinement control updates.
+#define CTRL_TAGS gc, ergo, refine
+
+// For logging zone values, ensuring consistency of level and tags.
+#define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
+
+// Package for pair of refinement thread activation and deactivation
+// thresholds.  The activation and deactivation levels are resp. the first
+// and second values of the pair.
+typedef Pair<size_t, size_t> Thresholds;
+inline size_t activation_level(const Thresholds& t) { return t.first; }
+inline size_t deactivation_level(const Thresholds& t) { return t.second; }
+
+static Thresholds calc_thresholds(size_t green_zone,
+                                  size_t yellow_zone,
+                                  uint worker_i) {
+  double yellow_size = yellow_zone - green_zone;
+  double step = yellow_size / ConcurrentG1Refine::thread_num();
+  if (worker_i == 0) {
+    // Potentially activate worker 0 more aggressively, to keep
+    // available buffers near green_zone value.  When yellow_size is
+    // large we don't want to allow a full step to accumulate before
+    // doing any processing, as that might lead to significantly more
+    // than green_zone buffers to be processed by update_rs.
+    step = MIN2(step, ParallelGCThreads / 2.0);
+  }
+  size_t activate_offset = static_cast<size_t>(ceil(step * (worker_i + 1)));
+  size_t deactivate_offset = static_cast<size_t>(floor(step * worker_i));
+  return Thresholds(green_zone + activate_offset,
+                    green_zone + deactivate_offset);
+}
+
+ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h,
+                                       size_t green_zone,
+                                       size_t yellow_zone,
+                                       size_t red_zone,
+                                       size_t min_yellow_zone_size) :
   _threads(NULL),
   _sample_thread(NULL),
-  _predictor_sigma(predictor->sigma()),
+  _n_worker_threads(thread_num()),
+  _green_zone(green_zone),
+  _yellow_zone(yellow_zone),
+  _red_zone(red_zone),
+  _min_yellow_zone_size(min_yellow_zone_size),
   _hot_card_cache(g1h)
 {
-  // Ergonomically select initial concurrent refinement parameters
-  if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, ParallelGCThreads);
-  }
-  set_green_zone(G1ConcRefinementGreenZone);
+  assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
+}
 
-  if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
+static size_t calc_min_yellow_zone_size() {
+  size_t step = G1ConcRefinementThresholdStep;
+  uint n_workers = ConcurrentG1Refine::thread_num();
+  if ((max_yellow_zone / step) < n_workers) {
+    return max_yellow_zone;
+  } else {
+    return step * n_workers;
   }
-  set_yellow_zone(MAX2(G1ConcRefinementYellowZone, green_zone()));
+}
 
-  if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
+static size_t calc_init_green_zone() {
+  size_t green = G1ConcRefinementGreenZone;
+  if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
+    green = ParallelGCThreads;
   }
-  set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone()));
-
+  return MIN2(green, max_green_zone);
 }
 
-ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) {
-  G1CollectorPolicy* policy = g1h->g1_policy();
-  ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h, &policy->predictor());
+static size_t calc_init_yellow_zone(size_t green, size_t min_size) {
+  size_t config = G1ConcRefinementYellowZone;
+  size_t size = 0;
+  if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
+    size = green * 2;
+  } else if (green < config) {
+    size = config - green;
+  }
+  size = MAX2(size, min_size);
+  size = MIN2(size, max_yellow_zone);
+  return MIN2(green + size, max_yellow_zone);
+}
+
+static size_t calc_init_red_zone(size_t green, size_t yellow) {
+  size_t size = yellow - green;
+  if (!FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
+    size_t config = G1ConcRefinementRedZone;
+    if (yellow < config) {
+      size = MAX2(size, config - yellow);
+    }
+  }
+  return MIN2(yellow + size, max_red_zone);
+}
+
+ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h,
+                                               CardTableEntryClosure* refine_closure,
+                                               jint* ecode) {
+  size_t min_yellow_zone_size = calc_min_yellow_zone_size();
+  size_t green_zone = calc_init_green_zone();
+  size_t yellow_zone = calc_init_yellow_zone(green_zone, min_yellow_zone_size);
+  size_t red_zone = calc_init_red_zone(green_zone, yellow_zone);
+
+  LOG_ZONES("Initial Refinement Zones: "
+            "green: " SIZE_FORMAT ", "
+            "yellow: " SIZE_FORMAT ", "
+            "red: " SIZE_FORMAT ", "
+            "min yellow size: " SIZE_FORMAT,
+            green_zone, yellow_zone, red_zone, min_yellow_zone_size);
+
+  ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h,
+                                                    green_zone,
+                                                    yellow_zone,
+                                                    red_zone,
+                                                    min_yellow_zone_size);
+
   if (cg1r == NULL) {
     *ecode = JNI_ENOMEM;
     vm_shutdown_during_initialization("Could not create ConcurrentG1Refine");
     return NULL;
   }
-  cg1r->_n_worker_threads = thread_num();
-
-  cg1r->reset_threshold_step();
 
   cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_worker_threads, mtGC);
   if (cg1r->_threads == NULL) {
@@ -77,7 +208,15 @@
 
   ConcurrentG1RefineThread *next = NULL;
   for (uint i = cg1r->_n_worker_threads - 1; i != UINT_MAX; i--) {
-    ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(cg1r, next, refine_closure, worker_id_offset, i);
+    Thresholds thresholds = calc_thresholds(green_zone, yellow_zone, i);
+    ConcurrentG1RefineThread* t =
+      new ConcurrentG1RefineThread(cg1r,
+                                   next,
+                                   refine_closure,
+                                   worker_id_offset,
+                                   i,
+                                   activation_level(thresholds),
+                                   deactivation_level(thresholds));
     assert(t != NULL, "Conc refine should have been created");
     if (t->osthread() == NULL) {
       *ecode = JNI_ENOMEM;
@@ -101,14 +240,6 @@
   return cg1r;
 }
 
-void ConcurrentG1Refine::reset_threshold_step() {
-  if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
-    _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
-  } else {
-    _thread_threshold_step = G1ConcRefinementThresholdStep;
-  }
-}
-
 void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
   _hot_card_cache.initialize(card_counts_storage);
 }
@@ -120,10 +251,11 @@
   _sample_thread->stop();
 }
 
-void ConcurrentG1Refine::reinitialize_threads() {
-  reset_threshold_step();
+void ConcurrentG1Refine::update_thread_thresholds() {
   for (uint i = 0; i < _n_worker_threads; i++) {
-    _threads[i]->initialize();
+    Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, i);
+    _threads[i]->update_thresholds(activation_level(thresholds),
+                                   deactivation_level(thresholds));
   }
 }
 
@@ -142,7 +274,7 @@
 }
 
 void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
-  for (uint i = 0; i < worker_thread_num(); i++) {
+  for (uint i = 0; i < _n_worker_threads; i++) {
     tc->do_thread(_threads[i]);
   }
 }
@@ -160,34 +292,80 @@
   st->cr();
 }
 
+static size_t calc_new_green_zone(size_t green,
+                                  double update_rs_time,
+                                  size_t update_rs_processed_buffers,
+                                  double goal_ms) {
+  // Adjust green zone based on whether we're meeting the time goal.
+  // Limit to max_green_zone.
+  const double inc_k = 1.1, dec_k = 0.9;
+  if (update_rs_time > goal_ms) {
+    if (green > 0) {
+      green = static_cast<size_t>(green * dec_k);
+    }
+  } else if (update_rs_time < goal_ms &&
+             update_rs_processed_buffers > green) {
+    green = static_cast<size_t>(MAX2(green * inc_k, green + 1.0));
+    green = MIN2(green, max_green_zone);
+  }
+  return green;
+}
+
+static size_t calc_new_yellow_zone(size_t green, size_t min_yellow_size) {
+  size_t size = green * 2;
+  size = MAX2(size, min_yellow_size);
+  return MIN2(green + size, max_yellow_zone);
+}
+
+static size_t calc_new_red_zone(size_t green, size_t yellow) {
+  return MIN2(yellow + (yellow - green), max_red_zone);
+}
+
+void ConcurrentG1Refine::update_zones(double update_rs_time,
+                                      size_t update_rs_processed_buffers,
+                                      double goal_ms) {
+  log_trace( CTRL_TAGS )("Updating Refinement Zones: "
+                         "update_rs time: %.3fms, "
+                         "update_rs buffers: " SIZE_FORMAT ", "
+                         "update_rs goal time: %.3fms",
+                         update_rs_time,
+                         update_rs_processed_buffers,
+                         goal_ms);
+
+  _green_zone = calc_new_green_zone(_green_zone,
+                                    update_rs_time,
+                                    update_rs_processed_buffers,
+                                    goal_ms);
+  _yellow_zone = calc_new_yellow_zone(_green_zone, _min_yellow_zone_size);
+  _red_zone = calc_new_red_zone(_green_zone, _yellow_zone);
+
+  assert_zone_constraints_gyr(_green_zone, _yellow_zone, _red_zone);
+  LOG_ZONES("Updated Refinement Zones: "
+            "green: " SIZE_FORMAT ", "
+            "yellow: " SIZE_FORMAT ", "
+            "red: " SIZE_FORMAT,
+            _green_zone, _yellow_zone, _red_zone);
+}
+
 void ConcurrentG1Refine::adjust(double update_rs_time,
-                                double update_rs_processed_buffers,
+                                size_t update_rs_processed_buffers,
                                 double goal_ms) {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 
   if (G1UseAdaptiveConcRefinement) {
-    const int k_gy = 3, k_gr = 6;
-    const double inc_k = 1.1, dec_k = 0.9;
+    update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
+    update_thread_thresholds();
 
-    size_t g = green_zone();
-    if (update_rs_time > goal_ms) {
-      g = (size_t)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
+    // Change the barrier params
+    if (_n_worker_threads == 0) {
+      // Disable dcqs notification when there are no threads to notify.
+      dcqs.set_process_completed_threshold(INT_MAX);
     } else {
-      if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
-        g = (size_t)MAX2(g * inc_k, g + 1.0);
-      }
+      // Worker 0 is the primary; wakeup is via dcqs notification.
+      STATIC_ASSERT(max_yellow_zone <= INT_MAX);
+      size_t activate = _threads[0]->activation_threshold();
+      dcqs.set_process_completed_threshold((int)activate);
     }
-    // Change the refinement threads params
-    set_green_zone(g);
-    set_yellow_zone(g * k_gy);
-    set_red_zone(g * k_gr);
-    reinitialize_threads();
-
-    size_t processing_threshold_delta = MAX2<size_t>(green_zone() * _predictor_sigma, 1);
-    size_t processing_threshold = MIN2(green_zone() + processing_threshold_delta,
-                                    yellow_zone());
-    // Change the barrier params
-    dcqs.set_process_completed_threshold((int)processing_threshold);
     dcqs.set_max_completed_queue((int)red_zone());
   }
 
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -65,18 +65,24 @@
   size_t _green_zone;
   size_t _yellow_zone;
   size_t _red_zone;
-
-  size_t _thread_threshold_step;
-
-  double _predictor_sigma;
+  size_t _min_yellow_zone_size;
 
   // We delay the refinement of 'hot' cards using the hot card cache.
   G1HotCardCache _hot_card_cache;
 
-  // Reset the threshold step value based of the current zone boundaries.
-  void reset_threshold_step();
+  ConcurrentG1Refine(G1CollectedHeap* g1h,
+                     size_t green_zone,
+                     size_t yellow_zone,
+                     size_t red_zone,
+                     size_t min_yellow_zone_size);
 
-  ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictions);
+  // Update green/yellow/red zone values based on how well goals are being met.
+  void update_zones(double update_rs_time,
+                    size_t update_rs_processed_buffers,
+                    double goal_ms);
+
+  // Update thread thresholds to account for updated zone values.
+  void update_thread_thresholds();
 
  public:
   ~ConcurrentG1Refine();
@@ -88,9 +94,7 @@
   void init(G1RegionToSpaceMapper* card_counts_storage);
   void stop();
 
-  void adjust(double update_rs_time, double update_rs_processed_buffers, double goal_ms);
-
-  void reinitialize_threads();
+  void adjust(double update_rs_time, size_t update_rs_processed_buffers, double goal_ms);
 
   // Iterate over all concurrent refinement threads
   void threads_do(ThreadClosure *tc);
@@ -105,18 +109,10 @@
 
   void print_worker_threads_on(outputStream* st) const;
 
-  void set_green_zone(size_t x)  { _green_zone = x;  }
-  void set_yellow_zone(size_t x) { _yellow_zone = x; }
-  void set_red_zone(size_t x)    { _red_zone = x;    }
-
   size_t green_zone() const      { return _green_zone;  }
   size_t yellow_zone() const     { return _yellow_zone; }
   size_t red_zone() const        { return _red_zone;    }
 
-  uint worker_thread_num() const { return _n_worker_threads; }
-
-  size_t thread_threshold_step() const { return _thread_threshold_step; }
-
   G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
 
   static bool hot_card_cache_enabled() { return G1HotCardCache::default_use_cache(); }
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -26,7 +26,6 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
@@ -36,7 +35,8 @@
 ConcurrentG1RefineThread::
 ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
                          CardTableEntryClosure* refine_closure,
-                         uint worker_id_offset, uint worker_id) :
+                         uint worker_id_offset, uint worker_id,
+                         size_t activate, size_t deactivate) :
   ConcurrentGCThread(),
   _refine_closure(refine_closure),
   _worker_id_offset(worker_id_offset),
@@ -45,7 +45,9 @@
   _next(next),
   _monitor(NULL),
   _cg1r(cg1r),
-  _vtime_accum(0.0)
+  _vtime_accum(0.0),
+  _activation_threshold(activate),
+  _deactivation_threshold(deactivate)
 {
 
   // Each thread has its own monitor. The i-th thread is responsible for signaling
@@ -58,21 +60,17 @@
   } else {
     _monitor = DirtyCardQ_CBL_mon;
   }
-  initialize();
 
   // set name
   set_name("G1 Refine#%d", worker_id);
   create_and_start();
 }
 
-void ConcurrentG1RefineThread::initialize() {
-  // Current thread activation threshold
-  _threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
-                    cg1r()->yellow_zone());
-  // A thread deactivates once the number of buffer reached a deactivation threshold
-   _deactivation_threshold =
-     MAX2(_threshold - MIN2(_threshold, cg1r()->thread_threshold_step()),
-          cg1r()->green_zone());
+void ConcurrentG1RefineThread::update_thresholds(size_t activate,
+                                                 size_t deactivate) {
+  assert(deactivate < activate, "precondition");
+  _activation_threshold = activate;
+  _deactivation_threshold = deactivate;
 }
 
 void ConcurrentG1RefineThread::wait_for_completed_buffers() {
@@ -118,9 +116,10 @@
       break;
     }
 
+    size_t buffers_processed = 0;
     DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
     log_debug(gc, refine)("Activated %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
-                          _worker_id, _threshold, dcqs.completed_buffers_num());
+                          _worker_id, _activation_threshold, dcqs.completed_buffers_num());
 
     {
       SuspendibleThreadSetJoiner sts_join;
@@ -139,7 +138,9 @@
         }
 
         // Check if we need to activate the next thread.
-        if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
+        if ((_next != NULL) &&
+            !_next->is_active() &&
+            (curr_buffer_num > _next->_activation_threshold)) {
           _next->activate();
         }
 
@@ -150,14 +151,16 @@
                                                     false /* during_pause */)) {
           break; // Deactivate, number of buffers fell below threshold.
         }
+        ++buffers_processed;
       }
     }
 
     deactivate();
     log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT
-                          ", current: " SIZE_FORMAT,
+                          ", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT,
                           _worker_id, _deactivation_threshold,
-                          dcqs.completed_buffers_num());
+                          dcqs.completed_buffers_num(),
+                          buffers_processed);
 
     if (os::supports_vtime()) {
       _vtime_accum = (os::elapsedVTime() - _vtime_start);
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -53,10 +53,8 @@
   // The closure applied to completed log buffers.
   CardTableEntryClosure* _refine_closure;
 
-  size_t _thread_threshold_step;
-  // This thread activation threshold
-  size_t _threshold;
-  // This thread deactivation threshold
+  // This thread's activation/deactivation thresholds
+  size_t _activation_threshold;
   size_t _deactivation_threshold;
 
   void wait_for_completed_buffers();
@@ -75,9 +73,11 @@
   // Constructor
   ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
                            CardTableEntryClosure* refine_closure,
-                           uint worker_id_offset, uint worker_id);
+                           uint worker_id_offset, uint worker_id,
+                           size_t activate, size_t deactivate);
 
-  void initialize();
+  void update_thresholds(size_t activate, size_t deactivate);
+  size_t activation_threshold() const { return _activation_threshold; }
 
   // Total virtual time so far.
   double vtime_accum() { return _vtime_accum; }
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -27,9 +27,9 @@
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Analytics.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcId.hpp"
@@ -80,7 +80,7 @@
 };
 
 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
-void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
+void ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
   const G1Analytics* analytics = g1_policy->analytics();
   if (g1_policy->adaptive_young_list_length()) {
     double now = os::elapsedTime();
@@ -111,7 +111,7 @@
   _vtime_start = os::elapsedVTime();
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1_policy = g1h->g1_policy();
+  G1Policy* g1_policy = g1h->g1_policy();
 
   while (!should_terminate()) {
     // wait until started is set.
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -31,7 +31,7 @@
 // as well as handling various marking cleanup.
 
 class G1ConcurrentMark;
-class G1CollectorPolicy;
+class G1Policy;
 
 class ConcurrentMarkThread: public ConcurrentGCThread {
   friend class VMStructs;
@@ -51,7 +51,7 @@
   volatile State _state;
 
   void sleepBeforeNextCycle();
-  void delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark);
+  void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
 
   void run_service();
   void stop_service();
--- a/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -27,7 +27,6 @@
 #include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1CardCounts.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CardCounts.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -25,8 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1CardCounts.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
-#include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/copy.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -28,6 +28,7 @@
 #include "gc/g1/g1CardLiveData.inline.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/workgroup.hpp"
+#include "logging/log.hpp"
 #include "memory/universe.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/globals.hpp"
@@ -38,6 +39,7 @@
 G1CardLiveData::G1CardLiveData() :
   _max_capacity(0),
   _cards_per_region(0),
+  _gc_timestamp_at_create(0),
   _live_regions(NULL),
   _live_regions_size_in_bits(0),
   _live_cards(NULL),
@@ -127,6 +129,13 @@
   // lots of work most of the time.
   BitMap::idx_t _last_marked_bit_idx;
 
+  void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
+    BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
+    BitMap::idx_t end_idx = card_live_bitmap_index_for((HeapWord*)align_ptr_up(end, CardTableModRefBS::card_size));
+
+    _card_bm.clear_range(start_idx, end_idx);
+  }
+
   // Mark the card liveness bitmap for the object spanning from start to end.
   void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
     BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
@@ -169,6 +178,10 @@
     _region_bm.par_set_bit(hr->hrm_index());
   }
 
+  void reset_live_data(HeapRegion* hr) {
+    clear_card_bitmap_range(hr->next_top_at_mark_start(), hr->end());
+  }
+
   // Mark the range of bits covered by allocations done since the last marking
   // in the given heap region, i.e. from NTAMS to top of the given region.
   // Returns if there has been some allocation in this region since the last marking.
@@ -305,6 +318,8 @@
 };
 
 void G1CardLiveData::create(WorkGang* workers, G1CMBitMap* mark_bitmap) {
+  _gc_timestamp_at_create = G1CollectedHeap::heap()->get_gc_time_stamp();
+
   uint n_workers = workers->active_workers();
 
   G1CreateCardLiveDataTask cl(mark_bitmap,
@@ -322,14 +337,24 @@
   class G1FinalizeCardLiveDataClosure: public HeapRegionClosure {
   private:
     G1CardLiveDataHelper _helper;
+
+    uint _gc_timestamp_at_create;
+
+    bool has_been_reclaimed(HeapRegion* hr) const {
+      return hr->get_gc_time_stamp() > _gc_timestamp_at_create;
+    }
   public:
     G1FinalizeCardLiveDataClosure(G1CollectedHeap* g1h,
                                   G1CMBitMap* bitmap,
                                   G1CardLiveData* live_data) :
       HeapRegionClosure(),
-      _helper(live_data, g1h->reserved_region().start()) { }
+      _helper(live_data, g1h->reserved_region().start()),
+      _gc_timestamp_at_create(live_data->gc_timestamp_at_create()) { }
 
     bool doHeapRegion(HeapRegion* hr) {
+      if (has_been_reclaimed(hr)) {
+        _helper.reset_live_data(hr);
+      }
       bool allocated_since_marking = _helper.mark_allocated_since_marking(hr);
       if (allocated_since_marking || hr->next_marked_bytes() > 0) {
         _helper.set_bit_for_region(hr);
@@ -459,27 +484,26 @@
       // Verify the marked bytes for this region.
 
       if (exp_marked_bytes != act_marked_bytes) {
+        log_error(gc)("Expected marked bytes " SIZE_FORMAT " != actual marked bytes " SIZE_FORMAT " in region %u", exp_marked_bytes, act_marked_bytes, hr->hrm_index());
         failures += 1;
       } else if (exp_marked_bytes > HeapRegion::GrainBytes) {
+        log_error(gc)("Expected marked bytes " SIZE_FORMAT " larger than possible " SIZE_FORMAT " in region %u", exp_marked_bytes, HeapRegion::GrainBytes, hr->hrm_index());
         failures += 1;
       }
 
       // Verify the bit, for this region, in the actual and expected
       // (which was just calculated) region bit maps.
-      // We're not OK if the bit in the calculated expected region
-      // bitmap is set and the bit in the actual region bitmap is not.
       uint index = hr->hrm_index();
 
       bool expected = _exp_live_data->is_region_live(index);
       bool actual = _act_live_data->is_region_live(index);
-      if (expected && !actual) {
+      if (expected != actual) {
+        log_error(gc)("Expected liveness %d not equal actual %d in region %u", expected, actual, hr->hrm_index());
         failures += 1;
       }
 
       // Verify that the card bit maps for the cards spanned by the current
-      // region match. We have an error if we have a set bit in the expected
-      // bit map and the corresponding bit in the actual bitmap is not set.
-
+      // region match.
       BitMap::idx_t start_idx = _helper.card_live_bitmap_index_for(hr->bottom());
       BitMap::idx_t end_idx = _helper.card_live_bitmap_index_for(hr->top());
 
@@ -487,7 +511,8 @@
         expected = _exp_live_data->is_card_live_at(i);
         actual = _act_live_data->is_card_live_at(i);
 
-        if (expected && !actual) {
+        if (expected != actual) {
+          log_error(gc)("Expected card liveness %d not equal actual card liveness %d at card " SIZE_FORMAT " in region %u", expected, actual, i, hr->hrm_index());
           failures += 1;
         }
       }
--- a/hotspot/src/share/vm/gc/g1/g1CardLiveData.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CardLiveData.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -46,6 +46,17 @@
   size_t _max_capacity;
   size_t _cards_per_region;
 
+  // Regions may be reclaimed while concurrently creating live data (e.g. due to humongous
+  // eager reclaim). This results in wrong live data for these regions at the end.
+  // So we need to somehow detect these regions, and during live data finalization completely
+  // recreate their information.
+  // This _gc_timestamp_at_create tracks the global timestamp when live data creation
+  // has started. Any regions with a higher time stamp have been cleared after that
+  // point in time, and need re-finalization.
+  // Unsynchronized access to this variable is okay, since this value is only set during a
+  // concurrent phase, and read only at the Cleanup safepoint. I.e. there is always
+  // full memory synchronization inbetween.
+  uint _gc_timestamp_at_create;
   // The per-card liveness bitmap.
   bm_word_t* _live_cards;
   size_t _live_cards_size_in_bits;
@@ -69,6 +80,8 @@
   size_t live_region_bitmap_size_in_bits() const;
   size_t live_card_bitmap_size_in_bits() const;
 public:
+  uint gc_timestamp_at_create() const { return _gc_timestamp_at_create; }
+
   inline bool is_region_live(uint region) const;
 
   inline void remove_nonlive_cards(uint region, BitMap* bm);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -45,6 +45,7 @@
 #include "gc/g1/g1MarkSweep.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/g1RemSet.inline.hpp"
 #include "gc/g1/g1RootClosures.hpp"
@@ -97,7 +98,7 @@
   RefineCardTableEntryClosure() : _concurrent(true) { }
 
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
+    bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, NULL);
     // This path is executed by the concurrent refine or mutator threads,
     // concurrently, and so we do not care if card_ptr contains references
     // that point into the collection set.
@@ -1744,10 +1745,11 @@
 
 // Public methods.
 
-G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
+G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
   CollectedHeap(),
-  _g1_policy(policy_),
-  _collection_set(this),
+  _collector_policy(collector_policy),
+  _g1_policy(create_g1_policy()),
+  _collection_set(this, _g1_policy),
   _dirty_card_queue_set(false),
   _is_alive_closure_cm(this),
   _is_alive_closure_stw(this),
@@ -2134,7 +2136,7 @@
 }
 
 CollectorPolicy* G1CollectedHeap::collector_policy() const {
-  return g1_policy();
+  return _collector_policy;
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -3088,28 +3090,6 @@
     }
 };
 
-#ifdef ASSERT
-class VerifyCSetClosure: public HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* hr) {
-    // Here we check that the CSet region's RSet is ready for parallel
-    // iteration. The fields that we'll verify are only manipulated
-    // when the region is part of a CSet and is collected. Afterwards,
-    // we reset these fields when we clear the region's RSet (when the
-    // region is freed) so they are ready when the region is
-    // re-allocated. The only exception to this is if there's an
-    // evacuation failure and instead of freeing the region we leave
-    // it in the heap. In that case, we reset these fields during
-    // evacuation failure handling.
-    guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
-
-    // Here's a good place to add any other checks we'd like to
-    // perform on CSet regions.
-    return false;
-  }
-};
-#endif // ASSERT
-
 uint G1CollectedHeap::num_task_queues() const {
   return _task_queues->size();
 }
@@ -3352,11 +3332,6 @@
           }
         }
 
-#ifdef ASSERT
-        VerifyCSetClosure cl;
-        collection_set_iterate(&cl);
-#endif // ASSERT
-
         // Initialize the GC alloc regions.
         _allocator->init_gc_alloc_regions(evacuation_info);
 
@@ -4859,7 +4834,7 @@
   // head and length, and unlink any young regions in the code below
   _young_list->clear();
 
-  G1CollectorPolicy* policy = g1_policy();
+  G1Policy* policy = g1_policy();
 
   double start_sec = os::elapsedTime();
   bool non_young = true;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -68,6 +68,7 @@
 class Space;
 class G1CollectionSet;
 class G1CollectorPolicy;
+class G1Policy;
 class G1RemSet;
 class HeapRegionRemSetIterator;
 class G1ConcurrentMark;
@@ -137,6 +138,7 @@
 
 private:
   WorkGang* _workers;
+  G1CollectorPolicy* _collector_policy;
 
   static size_t _humongous_object_threshold_in_words;
 
@@ -243,7 +245,7 @@
   // If not, we can skip a few steps.
   bool _has_humongous_reclaim_candidates;
 
-  volatile unsigned _gc_time_stamp;
+  volatile uint _gc_time_stamp;
 
   G1HRPrinter _hr_printer;
 
@@ -290,6 +292,8 @@
                                                          size_t size,
                                                          size_t translation_factor);
 
+  static G1Policy* create_g1_policy();
+
   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 
   void process_weak_jni_handles();
@@ -360,7 +364,7 @@
   YoungList*  _young_list;
 
   // The current policy object for the collector.
-  G1CollectorPolicy* _g1_policy;
+  G1Policy* _g1_policy;
   G1HeapSizingPolicy* _heap_sizing_policy;
 
   G1CollectionSet _collection_set;
@@ -979,7 +983,7 @@
   G1CollectorState* collector_state() { return &_collector_state; }
 
   // The current policy object for the collector.
-  G1CollectorPolicy* g1_policy() const { return _g1_policy; }
+  G1Policy* g1_policy() const { return _g1_policy; }
 
   const G1CollectionSet* collection_set() const { return &_collection_set; }
   G1CollectionSet* collection_set() { return &_collection_set; }
@@ -995,7 +999,7 @@
   // Try to minimize the remembered set.
   void scrub_rem_set();
 
-  unsigned get_gc_time_stamp() {
+  uint get_gc_time_stamp() {
     return _gc_time_stamp;
   }
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -26,7 +26,6 @@
 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 
 #include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap_ext.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap_ext.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -38,3 +38,7 @@
                                              MemRegion mr) {
   return new HeapRegion(hrs_index, bot(), mr);
 }
+
+G1Policy* G1CollectedHeap::create_g1_policy() {
+  return new G1Policy;
+}
--- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -25,8 +25,8 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.hpp"
@@ -49,9 +49,9 @@
 }
 
 
-G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h) :
+G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
   _g1(g1h),
-  _policy(NULL),
+  _policy(policy),
   _cset_chooser(new CollectionSetChooser()),
   _eden_region_length(0),
   _survivor_region_length(0),
--- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -31,14 +31,14 @@
 #include "utilities/globalDefinitions.hpp"
 
 class G1CollectedHeap;
-class G1CollectorPolicy;
 class G1CollectorState;
 class G1GCPhaseTimes;
+class G1Policy;
 class HeapRegion;
 
 class G1CollectionSet VALUE_OBJ_CLASS_SPEC {
   G1CollectedHeap* _g1;
-  G1CollectorPolicy* _policy;
+  G1Policy* _policy;
 
   CollectionSetChooser* _cset_chooser;
 
@@ -110,14 +110,9 @@
   double predict_region_elapsed_time_ms(HeapRegion* hr);
 
 public:
-  G1CollectionSet(G1CollectedHeap* g1h);
+  G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
   ~G1CollectionSet();
 
-  void set_policy(G1CollectorPolicy* g1p) {
-    assert(_policy == NULL, "should only initialize once");
-    _policy = g1p;
-  }
-
   CollectionSetChooser* cset_chooser();
 
   void init_region_lengths(uint eden_cset_region_length,
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -23,43 +23,16 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/concurrentG1Refine.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Analytics.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
-#include "gc/g1/g1ConcurrentMark.hpp"
-#include "gc/g1/g1IHOPControl.hpp"
-#include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1YoungGenSizer.hpp"
-#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/java.hpp"
-#include "runtime/mutexLocker.hpp"
+#include "runtime/globals.hpp"
 #include "utilities/debug.hpp"
-#include "utilities/pair.hpp"
 
-G1CollectorPolicy::G1CollectorPolicy() :
-  _predictor(G1ConfidencePercent / 100.0),
-  _analytics(new G1Analytics(&_predictor)),
-  _pause_time_target_ms((double) MaxGCPauseMillis),
-  _rs_lengths_prediction(0),
-  _max_survivor_regions(0),
-  _survivors_age_table(true),
-
-  _bytes_allocated_in_old_since_last_gc(0),
-  _ihop_control(NULL),
-  _initial_mark_to_mixed() {
-
-  // SurvRateGroups below must be initialized after the predictor because they
-  // indirectly use it through this object passed to their constructor.
-  _short_lived_surv_rate_group =
-    new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
-  _survivor_surv_rate_group =
-    new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
+G1CollectorPolicy::G1CollectorPolicy() {
 
   // Set up the region size and associated fields. Given that the
   // policy is created before the heap, we have to set this up here,
@@ -75,8 +48,6 @@
   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   HeapRegionRemSet::setup_remset_size();
 
-  _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
-
   // Below, we might need to calculate the pause time target based on
   // the pause interval. When we do so we are going to give G1 maximum
   // flexibility and allow it to do pauses when it needs to. So, we'll
@@ -113,25 +84,6 @@
   }
   guarantee(GCPauseIntervalMillis >= 1, "Constraint for GCPauseIntervalMillis should guarantee that value is >= 1");
   guarantee(GCPauseIntervalMillis > MaxGCPauseMillis, "Constraint for GCPauseIntervalMillis should guarantee that GCPauseIntervalMillis > MaxGCPauseMillis");
-
-  double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
-  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
-  _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
-
-  _tenuring_threshold = MaxTenuringThreshold;
-
-
-  guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50.");
-  _reserve_factor = (double) G1ReservePercent / 100.0;
-  // This will be set when the heap is expanded
-  // for the first time during initialization.
-  _reserve_regions = 0;
-
-  _ihop_control = create_ihop_control();
-}
-
-G1CollectorPolicy::~G1CollectorPolicy() {
-  delete _ihop_control;
 }
 
 void G1CollectorPolicy::initialize_alignments() {
@@ -141,16 +93,6 @@
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
 
-G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
-
-void G1CollectorPolicy::post_heap_initialize() {
-  uintx max_regions = G1CollectedHeap::heap()->max_regions();
-  size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
-  if (max_young_size != MaxNewSize) {
-    FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
-  }
-}
-
 void G1CollectorPolicy::initialize_flags() {
   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
@@ -159,1114 +101,9 @@
   guarantee(SurvivorRatio >= 1, "Range checking for SurvivorRatio should guarantee that value is >= 1");
 
   CollectorPolicy::initialize_flags();
-  _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
-}
-
-
-void G1CollectorPolicy::init() {
-  // Set aside an initial future to_space.
-  _g1 = G1CollectedHeap::heap();
-  _collection_set = _g1->collection_set();
-  _collection_set->set_policy(this);
-
-  assert(Heap_lock->owned_by_self(), "Locking discipline.");
-
-  initialize_gc_policy_counters();
-
-  if (adaptive_young_list_length()) {
-    _young_list_fixed_length = 0;
-  } else {
-    _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
-  }
-  _free_regions_at_end_of_collection = _g1->num_free_regions();
-
-  update_young_list_max_and_target_length();
-  // We may immediately start allocating regions and placing them on the
-  // collection set list. Initialize the per-collection set info
-  _collection_set->start_incremental_building();
-}
-
-void G1CollectorPolicy::note_gc_start() {
-  phase_times()->note_gc_start();
 }
 
 // Create the jstat counters for the policy.
 void G1CollectorPolicy::initialize_gc_policy_counters() {
   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 }
-
-bool G1CollectorPolicy::predict_will_fit(uint young_length,
-                                         double base_time_ms,
-                                         uint base_free_regions,
-                                         double target_pause_time_ms) const {
-  if (young_length >= base_free_regions) {
-    // end condition 1: not enough space for the young regions
-    return false;
-  }
-
-  double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
-  size_t bytes_to_copy =
-               (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
-  double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,
-                                                                collector_state()->during_concurrent_mark());
-  double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length);
-  double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
-  if (pause_time_ms > target_pause_time_ms) {
-    // end condition 2: prediction is over the target pause time
-    return false;
-  }
-
-  size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
-
-  // When copying, we will likely need more bytes free than is live in the region.
-  // Add some safety margin to factor in the confidence of our guess, and the
-  // natural expected waste.
-  // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
-  // of the calculation: the lower the confidence, the more headroom.
-  // (100 + TargetPLABWastePct) represents the increase in expected bytes during
-  // copying due to anticipated waste in the PLABs.
-  double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
-  size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
-
-  if (expected_bytes_to_copy > free_bytes) {
-    // end condition 3: out-of-space
-    return false;
-  }
-
-  // success!
-  return true;
-}
-
-void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
-  // re-calculate the necessary reserve
-  double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
-  // We use ceiling so that if reserve_regions_d is > 0.0 (but
-  // smaller than 1.0) we'll get 1.
-  _reserve_regions = (uint) ceil(reserve_regions_d);
-
-  _young_gen_sizer->heap_size_changed(new_number_of_regions);
-
-  _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
-}
-
-uint G1CollectorPolicy::calculate_young_list_desired_min_length(
-                                                       uint base_min_length) const {
-  uint desired_min_length = 0;
-  if (adaptive_young_list_length()) {
-    if (_analytics->num_alloc_rate_ms() > 3) {
-      double now_sec = os::elapsedTime();
-      double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
-      double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
-      desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
-    } else {
-      // otherwise we don't have enough info to make the prediction
-    }
-  }
-  desired_min_length += base_min_length;
-  // make sure we don't go below any user-defined minimum bound
-  return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
-}
-
-uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
-  // Here, we might want to also take into account any additional
-  // constraints (i.e., user-defined minimum bound). Currently, we
-  // effectively don't set this bound.
-  return _young_gen_sizer->max_desired_young_length();
-}
-
-uint G1CollectorPolicy::update_young_list_max_and_target_length() {
-  return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
-}
-
-uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
-  uint unbounded_target_length = update_young_list_target_length(rs_lengths);
-  update_max_gc_locker_expansion();
-  return unbounded_target_length;
-}
-
-uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
-  YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
-  _young_list_target_length = young_lengths.first;
-  return young_lengths.second;
-}
-
-G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
-  YoungTargetLengths result;
-
-  // Calculate the absolute and desired min bounds first.
-
-  // This is how many young regions we already have (currently: the survivors).
-  const uint base_min_length = _g1->young_list()->survivor_length();
-  uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
-  // This is the absolute minimum young length. Ensure that we
-  // will at least have one eden region available for allocation.
-  uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
-  // If we shrank the young list target it should not shrink below the current size.
-  desired_min_length = MAX2(desired_min_length, absolute_min_length);
-  // Calculate the absolute and desired max bounds.
-
-  uint desired_max_length = calculate_young_list_desired_max_length();
-
-  uint young_list_target_length = 0;
-  if (adaptive_young_list_length()) {
-    if (collector_state()->gcs_are_young()) {
-      young_list_target_length =
-                        calculate_young_list_target_length(rs_lengths,
-                                                           base_min_length,
-                                                           desired_min_length,
-                                                           desired_max_length);
-    } else {
-      // Don't calculate anything and let the code below bound it to
-      // the desired_min_length, i.e., do the next GC as soon as
-      // possible to maximize how many old regions we can add to it.
-    }
-  } else {
-    // The user asked for a fixed young gen so we'll fix the young gen
-    // whether the next GC is young or mixed.
-    young_list_target_length = _young_list_fixed_length;
-  }
-
-  result.second = young_list_target_length;
-
-  // We will try our best not to "eat" into the reserve.
-  uint absolute_max_length = 0;
-  if (_free_regions_at_end_of_collection > _reserve_regions) {
-    absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
-  }
-  if (desired_max_length > absolute_max_length) {
-    desired_max_length = absolute_max_length;
-  }
-
-  // Make sure we don't go over the desired max length, nor under the
-  // desired min length. In case they clash, desired_min_length wins
-  // which is why that test is second.
-  if (young_list_target_length > desired_max_length) {
-    young_list_target_length = desired_max_length;
-  }
-  if (young_list_target_length < desired_min_length) {
-    young_list_target_length = desired_min_length;
-  }
-
-  assert(young_list_target_length > base_min_length,
-         "we should be able to allocate at least one eden region");
-  assert(young_list_target_length >= absolute_min_length, "post-condition");
-
-  result.first = young_list_target_length;
-  return result;
-}
-
-uint
-G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
-                                                     uint base_min_length,
-                                                     uint desired_min_length,
-                                                     uint desired_max_length) const {
-  assert(adaptive_young_list_length(), "pre-condition");
-  assert(collector_state()->gcs_are_young(), "only call this for young GCs");
-
-  // In case some edge-condition makes the desired max length too small...
-  if (desired_max_length <= desired_min_length) {
-    return desired_min_length;
-  }
-
-  // We'll adjust min_young_length and max_young_length not to include
-  // the already allocated young regions (i.e., so they reflect the
-  // min and max eden regions we'll allocate). The base_min_length
-  // will be reflected in the predictions by the
-  // survivor_regions_evac_time prediction.
-  assert(desired_min_length > base_min_length, "invariant");
-  uint min_young_length = desired_min_length - base_min_length;
-  assert(desired_max_length > base_min_length, "invariant");
-  uint max_young_length = desired_max_length - base_min_length;
-
-  double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
-  double survivor_regions_evac_time = predict_survivor_regions_evac_time();
-  size_t pending_cards = _analytics->predict_pending_cards();
-  size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
-  size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
-  double base_time_ms =
-    predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
-    survivor_regions_evac_time;
-  uint available_free_regions = _free_regions_at_end_of_collection;
-  uint base_free_regions = 0;
-  if (available_free_regions > _reserve_regions) {
-    base_free_regions = available_free_regions - _reserve_regions;
-  }
-
-  // Here, we will make sure that the shortest young length that
-  // makes sense fits within the target pause time.
-
-  if (predict_will_fit(min_young_length, base_time_ms,
-                       base_free_regions, target_pause_time_ms)) {
-    // The shortest young length will fit into the target pause time;
-    // we'll now check whether the absolute maximum number of young
-    // regions will fit in the target pause time. If not, we'll do
-    // a binary search between min_young_length and max_young_length.
-    if (predict_will_fit(max_young_length, base_time_ms,
-                         base_free_regions, target_pause_time_ms)) {
-      // The maximum young length will fit into the target pause time.
-      // We are done so set min young length to the maximum length (as
-      // the result is assumed to be returned in min_young_length).
-      min_young_length = max_young_length;
-    } else {
-      // The maximum possible number of young regions will not fit within
-      // the target pause time so we'll search for the optimal
-      // length. The loop invariants are:
-      //
-      // min_young_length < max_young_length
-      // min_young_length is known to fit into the target pause time
-      // max_young_length is known not to fit into the target pause time
-      //
-      // Going into the loop we know the above hold as we've just
-      // checked them. Every time around the loop we check whether
-      // the middle value between min_young_length and
-      // max_young_length fits into the target pause time. If it
-      // does, it becomes the new min. If it doesn't, it becomes
-      // the new max. This way we maintain the loop invariants.
-
-      assert(min_young_length < max_young_length, "invariant");
-      uint diff = (max_young_length - min_young_length) / 2;
-      while (diff > 0) {
-        uint young_length = min_young_length + diff;
-        if (predict_will_fit(young_length, base_time_ms,
-                             base_free_regions, target_pause_time_ms)) {
-          min_young_length = young_length;
-        } else {
-          max_young_length = young_length;
-        }
-        assert(min_young_length <  max_young_length, "invariant");
-        diff = (max_young_length - min_young_length) / 2;
-      }
-      // The results is min_young_length which, according to the
-      // loop invariants, should fit within the target pause time.
-
-      // These are the post-conditions of the binary search above:
-      assert(min_young_length < max_young_length,
-             "otherwise we should have discovered that max_young_length "
-             "fits into the pause target and not done the binary search");
-      assert(predict_will_fit(min_young_length, base_time_ms,
-                              base_free_regions, target_pause_time_ms),
-             "min_young_length, the result of the binary search, should "
-             "fit into the pause target");
-      assert(!predict_will_fit(min_young_length + 1, base_time_ms,
-                               base_free_regions, target_pause_time_ms),
-             "min_young_length, the result of the binary search, should be "
-             "optimal, so no larger length should fit into the pause target");
-    }
-  } else {
-    // Even the minimum length doesn't fit into the pause time
-    // target, return it as the result nevertheless.
-  }
-  return base_min_length + min_young_length;
-}
-
-double G1CollectorPolicy::predict_survivor_regions_evac_time() const {
-  double survivor_regions_evac_time = 0.0;
-  for (HeapRegion * r = _g1->young_list()->first_survivor_region();
-       r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
-       r = r->get_next_young_region()) {
-    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
-  }
-  return survivor_regions_evac_time;
-}
-
-void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
-  guarantee( adaptive_young_list_length(), "should not call this otherwise" );
-
-  if (rs_lengths > _rs_lengths_prediction) {
-    // add 10% to avoid having to recalculate often
-    size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
-    update_rs_lengths_prediction(rs_lengths_prediction);
-
-    update_young_list_max_and_target_length(rs_lengths_prediction);
-  }
-}
-
-void G1CollectorPolicy::update_rs_lengths_prediction() {
-  update_rs_lengths_prediction(_analytics->predict_rs_lengths());
-}
-
-void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
-  if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
-    _rs_lengths_prediction = prediction;
-  }
-}
-
-#ifndef PRODUCT
-bool G1CollectorPolicy::verify_young_ages() {
-  HeapRegion* head = _g1->young_list()->first_region();
-  return
-    verify_young_ages(head, _short_lived_surv_rate_group);
-  // also call verify_young_ages on any additional surv rate groups
-}
-
-bool
-G1CollectorPolicy::verify_young_ages(HeapRegion* head,
-                                     SurvRateGroup *surv_rate_group) {
-  guarantee( surv_rate_group != NULL, "pre-condition" );
-
-  const char* name = surv_rate_group->name();
-  bool ret = true;
-  int prev_age = -1;
-
-  for (HeapRegion* curr = head;
-       curr != NULL;
-       curr = curr->get_next_young_region()) {
-    SurvRateGroup* group = curr->surv_rate_group();
-    if (group == NULL && !curr->is_survivor()) {
-      log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name);
-      ret = false;
-    }
-
-    if (surv_rate_group == group) {
-      int age = curr->age_in_surv_rate_group();
-
-      if (age < 0) {
-        log_error(gc, verify)("## %s: encountered negative age", name);
-        ret = false;
-      }
-
-      if (age <= prev_age) {
-        log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
-        ret = false;
-      }
-      prev_age = age;
-    }
-  }
-
-  return ret;
-}
-#endif // PRODUCT
-
-void G1CollectorPolicy::record_full_collection_start() {
-  _full_collection_start_sec = os::elapsedTime();
-  // Release the future to-space so that it is available for compaction into.
-  collector_state()->set_full_collection(true);
-}
-
-void G1CollectorPolicy::record_full_collection_end() {
-  // Consider this like a collection pause for the purposes of allocation
-  // since last pause.
-  double end_sec = os::elapsedTime();
-  double full_gc_time_sec = end_sec - _full_collection_start_sec;
-  double full_gc_time_ms = full_gc_time_sec * 1000.0;
-
-  _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
-
-  collector_state()->set_full_collection(false);
-
-  // "Nuke" the heuristics that control the young/mixed GC
-  // transitions and make sure we start with young GCs after the Full GC.
-  collector_state()->set_gcs_are_young(true);
-  collector_state()->set_last_young_gc(false);
-  collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
-  collector_state()->set_during_initial_mark_pause(false);
-  collector_state()->set_in_marking_window(false);
-  collector_state()->set_in_marking_window_im(false);
-
-  _short_lived_surv_rate_group->start_adding_regions();
-  // also call this on any additional surv rate groups
-
-  _free_regions_at_end_of_collection = _g1->num_free_regions();
-  // Reset survivors SurvRateGroup.
-  _survivor_surv_rate_group->reset();
-  update_young_list_max_and_target_length();
-  update_rs_lengths_prediction();
-  cset_chooser()->clear();
-
-  _bytes_allocated_in_old_since_last_gc = 0;
-
-  record_pause(FullGC, _full_collection_start_sec, end_sec);
-}
-
-void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
-  // We only need to do this here as the policy will only be applied
-  // to the GC we're about to start. so, no point is calculating this
-  // every time we calculate / recalculate the target young length.
-  update_survivors_policy();
-
-  assert(_g1->used() == _g1->recalculate_used(),
-         "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
-         _g1->used(), _g1->recalculate_used());
-
-  phase_times()->record_cur_collection_start_sec(start_time_sec);
-  _pending_cards = _g1->pending_card_num();
-
-  _collection_set->reset_bytes_used_before();
-  _bytes_copied_during_gc = 0;
-
-  collector_state()->set_last_gc_was_young(false);
-
-  // do that for any other surv rate groups
-  _short_lived_surv_rate_group->stop_adding_regions();
-  _survivors_age_table.clear();
-
-  assert( verify_young_ages(), "region age verification" );
-}
-
-void G1CollectorPolicy::record_concurrent_mark_init_end(double
-                                                   mark_init_elapsed_time_ms) {
-  collector_state()->set_during_marking(true);
-  assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
-  collector_state()->set_during_initial_mark_pause(false);
-}
-
-void G1CollectorPolicy::record_concurrent_mark_remark_start() {
-  _mark_remark_start_sec = os::elapsedTime();
-  collector_state()->set_during_marking(false);
-}
-
-void G1CollectorPolicy::record_concurrent_mark_remark_end() {
-  double end_time_sec = os::elapsedTime();
-  double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
-  _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
-  _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
-
-  record_pause(Remark, _mark_remark_start_sec, end_time_sec);
-}
-
-void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
-  _mark_cleanup_start_sec = os::elapsedTime();
-}
-
-void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
-  bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
-                                                              "skip last young-only gc");
-  collector_state()->set_last_young_gc(should_continue_with_reclaim);
-  // We skip the marking phase.
-  if (!should_continue_with_reclaim) {
-    abort_time_to_mixed_tracking();
-  }
-  collector_state()->set_in_marking_window(false);
-}
-
-double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
-  return phase_times()->average_time_ms(phase);
-}
-
-double G1CollectorPolicy::young_other_time_ms() const {
-  return phase_times()->young_cset_choice_time_ms() +
-         phase_times()->young_free_cset_time_ms();
-}
-
-double G1CollectorPolicy::non_young_other_time_ms() const {
-  return phase_times()->non_young_cset_choice_time_ms() +
-         phase_times()->non_young_free_cset_time_ms();
-
-}
-
-double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
-  return pause_time_ms -
-         average_time_ms(G1GCPhaseTimes::UpdateRS) -
-         average_time_ms(G1GCPhaseTimes::ScanRS) -
-         average_time_ms(G1GCPhaseTimes::ObjCopy) -
-         average_time_ms(G1GCPhaseTimes::Termination);
-}
-
-double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
-  return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
-}
-
-CollectionSetChooser* G1CollectorPolicy::cset_chooser() const {
-  return _collection_set->cset_chooser();
-}
-
-bool G1CollectorPolicy::about_to_start_mixed_phase() const {
-  return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
-}
-
-bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
-  if (about_to_start_mixed_phase()) {
-    return false;
-  }
-
-  size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
-
-  size_t cur_used_bytes = _g1->non_young_capacity_bytes();
-  size_t alloc_byte_size = alloc_word_size * HeapWordSize;
-  size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
-
-  bool result = false;
-  if (marking_request_bytes > marking_initiating_used_threshold) {
-    result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
-    log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
-                              result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
-                              cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
-  }
-
-  return result;
-}
-
-// Anything below that is considered to be zero
-#define MIN_TIMER_GRANULARITY 0.0000001
-
-void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
-  double end_time_sec = os::elapsedTime();
-
-  size_t cur_used_bytes = _g1->used();
-  assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
-  bool last_pause_included_initial_mark = false;
-  bool update_stats = !_g1->evacuation_failed();
-
-  NOT_PRODUCT(_short_lived_surv_rate_group->print());
-
-  record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
-
-  last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
-  if (last_pause_included_initial_mark) {
-    record_concurrent_mark_init_end(0.0);
-  } else {
-    maybe_start_marking();
-  }
-
-  double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
-  if (app_time_ms < MIN_TIMER_GRANULARITY) {
-    // This usually happens due to the timer not having the required
-    // granularity. Some Linuxes are the usual culprits.
-    // We'll just set it to something (arbitrarily) small.
-    app_time_ms = 1.0;
-  }
-
-  if (update_stats) {
-    // We maintain the invariant that all objects allocated by mutator
-    // threads will be allocated out of eden regions. So, we can use
-    // the eden region number allocated since the previous GC to
-    // calculate the application's allocate rate. The only exception
-    // to that is humongous objects that are allocated separately. But
-    // given that humongous object allocations do not really affect
-    // either the pause's duration nor when the next pause will take
-    // place we can safely ignore them here.
-    uint regions_allocated = _collection_set->eden_region_length();
-    double alloc_rate_ms = (double) regions_allocated / app_time_ms;
-    _analytics->report_alloc_rate_ms(alloc_rate_ms);
-
-    double interval_ms =
-      (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
-    _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
-    _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
-  }
-
-  bool new_in_marking_window = collector_state()->in_marking_window();
-  bool new_in_marking_window_im = false;
-  if (last_pause_included_initial_mark) {
-    new_in_marking_window = true;
-    new_in_marking_window_im = true;
-  }
-
-  if (collector_state()->last_young_gc()) {
-    // This is supposed to to be the "last young GC" before we start
-    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
-    assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
-
-    if (next_gc_should_be_mixed("start mixed GCs",
-                                "do not start mixed GCs")) {
-      collector_state()->set_gcs_are_young(false);
-    } else {
-      // We aborted the mixed GC phase early.
-      abort_time_to_mixed_tracking();
-    }
-
-    collector_state()->set_last_young_gc(false);
-  }
-
-  if (!collector_state()->last_gc_was_young()) {
-    // This is a mixed GC. Here we decide whether to continue doing
-    // mixed GCs or not.
-    if (!next_gc_should_be_mixed("continue mixed GCs",
-                                 "do not continue mixed GCs")) {
-      collector_state()->set_gcs_are_young(true);
-
-      maybe_start_marking();
-    }
-  }
-
-  _short_lived_surv_rate_group->start_adding_regions();
-  // Do that for any other surv rate groups
-
-  double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
-
-  if (update_stats) {
-    double cost_per_card_ms = 0.0;
-    if (_pending_cards > 0) {
-      cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
-      _analytics->report_cost_per_card_ms(cost_per_card_ms);
-    }
-    _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
-
-    double cost_per_entry_ms = 0.0;
-    if (cards_scanned > 10) {
-      cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
-      _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
-    }
-
-    if (_max_rs_lengths > 0) {
-      double cards_per_entry_ratio =
-        (double) cards_scanned / (double) _max_rs_lengths;
-      _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
-    }
-
-    // This is defensive. For a while _max_rs_lengths could get
-    // smaller than _recorded_rs_lengths which was causing
-    // rs_length_diff to get very large and mess up the RSet length
-    // predictions. The reason was unsafe concurrent updates to the
-    // _inc_cset_recorded_rs_lengths field which the code below guards
-    // against (see CR 7118202). This bug has now been fixed (see CR
-    // 7119027). However, I'm still worried that
-    // _inc_cset_recorded_rs_lengths might still end up somewhat
-    // inaccurate. The concurrent refinement thread calculates an
-    // RSet's length concurrently with other CR threads updating it
-    // which might cause it to calculate the length incorrectly (if,
-    // say, it's in mid-coarsening). So I'll leave in the defensive
-    // conditional below just in case.
-    size_t rs_length_diff = 0;
-    size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
-    if (_max_rs_lengths > recorded_rs_lengths) {
-      rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
-    }
-    _analytics->report_rs_length_diff((double) rs_length_diff);
-
-    size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
-    size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
-    double cost_per_byte_ms = 0.0;
-
-    if (copied_bytes > 0) {
-      cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
-      _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
-    }
-
-    if (_collection_set->young_region_length() > 0) {
-      _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
-                                                        _collection_set->young_region_length());
-    }
-
-    if (_collection_set->old_region_length() > 0) {
-      _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
-                                                            _collection_set->old_region_length());
-    }
-
-    _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
-
-    _analytics->report_pending_cards((double) _pending_cards);
-    _analytics->report_rs_lengths((double) _max_rs_lengths);
-  }
-
-  collector_state()->set_in_marking_window(new_in_marking_window);
-  collector_state()->set_in_marking_window_im(new_in_marking_window_im);
-  _free_regions_at_end_of_collection = _g1->num_free_regions();
-  // IHOP control wants to know the expected young gen length if it were not
-  // restrained by the heap reserve. Using the actual length would make the
-  // prediction too small and the limit the young gen every time we get to the
-  // predicted target occupancy.
-  size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
-  update_rs_lengths_prediction();
-
-  update_ihop_prediction(app_time_ms / 1000.0,
-                         _bytes_allocated_in_old_since_last_gc,
-                         last_unrestrained_young_length * HeapRegion::GrainBytes);
-  _bytes_allocated_in_old_since_last_gc = 0;
-
-  _ihop_control->send_trace_event(_g1->gc_tracer_stw());
-
-  // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
-  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
-
-  if (update_rs_time_goal_ms < scan_hcc_time_ms) {
-    log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
-                                "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
-                                update_rs_time_goal_ms, scan_hcc_time_ms);
-
-    update_rs_time_goal_ms = 0;
-  } else {
-    update_rs_time_goal_ms -= scan_hcc_time_ms;
-  }
-  _g1->concurrent_g1_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
-                                      phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
-                                      update_rs_time_goal_ms);
-
-  cset_chooser()->verify();
-}
-
-G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
-  if (G1UseAdaptiveIHOP) {
-    return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
-                                     &_predictor,
-                                     G1ReservePercent,
-                                     G1HeapWastePercent);
-  } else {
-    return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
-  }
-}
-
-void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s,
-                                               size_t mutator_alloc_bytes,
-                                               size_t young_gen_size) {
-  // Always try to update IHOP prediction. Even evacuation failures give information
-  // about e.g. whether to start IHOP earlier next time.
-
-  // Avoid using really small application times that might create samples with
-  // very high or very low values. They may be caused by e.g. back-to-back gcs.
-  double const min_valid_time = 1e-6;
-
-  bool report = false;
-
-  double marking_to_mixed_time = -1.0;
-  if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
-    marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
-    assert(marking_to_mixed_time > 0.0,
-           "Initial mark to mixed time must be larger than zero but is %.3f",
-           marking_to_mixed_time);
-    if (marking_to_mixed_time > min_valid_time) {
-      _ihop_control->update_marking_length(marking_to_mixed_time);
-      report = true;
-    }
-  }
-
-  // As an approximation for the young gc promotion rates during marking we use
-  // all of them. In many applications there are only a few if any young gcs during
-  // marking, which makes any prediction useless. This increases the accuracy of the
-  // prediction.
-  if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
-    _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
-    report = true;
-  }
-
-  if (report) {
-    report_ihop_statistics();
-  }
-}
-
-void G1CollectorPolicy::report_ihop_statistics() {
-  _ihop_control->print();
-}
-
-void G1CollectorPolicy::print_phases() {
-  phase_times()->print();
-}
-
-double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
-  TruncatedSeq* seq = surv_rate_group->get_seq(age);
-  guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
-  double pred = _predictor.get_new_prediction(seq);
-  if (pred > 1.0) {
-    pred = 1.0;
-  }
-  return pred;
-}
-
-double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
-  return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
-}
-
-double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
-  return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
-}
-
-double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
-                                                       size_t scanned_cards) const {
-  return
-    _analytics->predict_rs_update_time_ms(pending_cards) +
-    _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
-    _analytics->predict_constant_other_time_ms();
-}
-
-double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
-  size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
-  size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
-  return predict_base_elapsed_time_ms(pending_cards, card_num);
-}
-
-size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
-  size_t bytes_to_copy;
-  if (hr->is_marked())
-    bytes_to_copy = hr->max_live_bytes();
-  else {
-    assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
-    int age = hr->age_in_surv_rate_group();
-    double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
-    bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
-  }
-  return bytes_to_copy;
-}
-
-double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
-                                                         bool for_young_gc) const {
-  size_t rs_length = hr->rem_set()->occupied();
-  // Predicting the number of cards is based on which type of GC
-  // we're predicting for.
-  size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
-  size_t bytes_to_copy = predict_bytes_to_copy(hr);
-
-  double region_elapsed_time_ms =
-    _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
-    _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
-
-  // The prediction of the "other" time for this region is based
-  // upon the region type and NOT the GC type.
-  if (hr->is_young()) {
-    region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
-  } else {
-    region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
-  }
-  return region_elapsed_time_ms;
-}
-
-
-void G1CollectorPolicy::print_yg_surv_rate_info() const {
-#ifndef PRODUCT
-  _short_lived_surv_rate_group->print_surv_rate_summary();
-  // add this call for any other surv rate groups
-#endif // PRODUCT
-}
-
-bool G1CollectorPolicy::is_young_list_full() const {
-  uint young_list_length = _g1->young_list()->length();
-  uint young_list_target_length = _young_list_target_length;
-  return young_list_length >= young_list_target_length;
-}
-
-bool G1CollectorPolicy::can_expand_young_list() const {
-  uint young_list_length = _g1->young_list()->length();
-  uint young_list_max_length = _young_list_max_length;
-  return young_list_length < young_list_max_length;
-}
-
-bool G1CollectorPolicy::adaptive_young_list_length() const {
-  return _young_gen_sizer->adaptive_young_list_length();
-}
-
-void G1CollectorPolicy::update_max_gc_locker_expansion() {
-  uint expansion_region_num = 0;
-  if (GCLockerEdenExpansionPercent > 0) {
-    double perc = (double) GCLockerEdenExpansionPercent / 100.0;
-    double expansion_region_num_d = perc * (double) _young_list_target_length;
-    // We use ceiling so that if expansion_region_num_d is > 0.0 (but
-    // less than 1.0) we'll get 1.
-    expansion_region_num = (uint) ceil(expansion_region_num_d);
-  } else {
-    assert(expansion_region_num == 0, "sanity");
-  }
-  _young_list_max_length = _young_list_target_length + expansion_region_num;
-  assert(_young_list_target_length <= _young_list_max_length, "post-condition");
-}
-
-// Calculates survivor space parameters.
-void G1CollectorPolicy::update_survivors_policy() {
-  double max_survivor_regions_d =
-                 (double) _young_list_target_length / (double) SurvivorRatio;
-  // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
-  // smaller than 1.0) we'll get 1.
-  _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
-
-  _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
-        HeapRegion::GrainWords * _max_survivor_regions, counters());
-}
-
-bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
-  // We actually check whether we are marking here and not if we are in a
-  // reclamation phase. This means that we will schedule a concurrent mark
-  // even while we are still in the process of reclaiming memory.
-  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
-  if (!during_cycle) {
-    log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
-    collector_state()->set_initiate_conc_mark_if_possible(true);
-    return true;
-  } else {
-    log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
-    return false;
-  }
-}
-
-void G1CollectorPolicy::initiate_conc_mark() {
-  collector_state()->set_during_initial_mark_pause(true);
-  collector_state()->set_initiate_conc_mark_if_possible(false);
-}
-
-void G1CollectorPolicy::decide_on_conc_mark_initiation() {
-  // We are about to decide on whether this pause will be an
-  // initial-mark pause.
-
-  // First, collector_state()->during_initial_mark_pause() should not be already set. We
-  // will set it here if we have to. However, it should be cleared by
-  // the end of the pause (it's only set for the duration of an
-  // initial-mark pause).
-  assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
-
-  if (collector_state()->initiate_conc_mark_if_possible()) {
-    // We had noticed on a previous pause that the heap occupancy has
-    // gone over the initiating threshold and we should start a
-    // concurrent marking cycle. So we might initiate one.
-
-    if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
-      // Initiate a new initial mark if there is no marking or reclamation going on.
-      initiate_conc_mark();
-      log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
-    } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
-      // Initiate a user requested initial mark. An initial mark must be young only
-      // GC, so the collector state must be updated to reflect this.
-      collector_state()->set_gcs_are_young(true);
-      collector_state()->set_last_young_gc(false);
-
-      abort_time_to_mixed_tracking();
-      initiate_conc_mark();
-      log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
-    } else {
-      // The concurrent marking thread is still finishing up the
-      // previous cycle. If we start one right now the two cycles
-      // overlap. In particular, the concurrent marking thread might
-      // be in the process of clearing the next marking bitmap (which
-      // we will use for the next cycle if we start one). Starting a
-      // cycle now will be bad given that parts of the marking
-      // information might get cleared by the marking thread. And we
-      // cannot wait for the marking thread to finish the cycle as it
-      // periodically yields while clearing the next marking bitmap
-      // and, if it's in a yield point, it's waiting for us to
-      // finish. So, at this point we will not start a cycle and we'll
-      // let the concurrent marking thread complete the last one.
-      log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
-    }
-  }
-}
-
-void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
-  cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
-
-  double end_sec = os::elapsedTime();
-  double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
-  _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
-  _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
-
-  record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
-}
-
-double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
-  // Returns the given amount of reclaimable bytes (that represents
-  // the amount of reclaimable space still to be collected) as a
-  // percentage of the current heap capacity.
-  size_t capacity_bytes = _g1->capacity();
-  return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
-}
-
-void G1CollectorPolicy::maybe_start_marking() {
-  if (need_to_start_conc_mark("end of GC")) {
-    // Note: this might have already been set, if during the last
-    // pause we decided to start a cycle but at the beginning of
-    // this pause we decided to postpone it. That's OK.
-    collector_state()->set_initiate_conc_mark_if_possible(true);
-  }
-}
-
-G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const {
-  assert(!collector_state()->full_collection(), "must be");
-  if (collector_state()->during_initial_mark_pause()) {
-    assert(collector_state()->last_gc_was_young(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
-    return InitialMarkGC;
-  } else if (collector_state()->last_young_gc()) {
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(collector_state()->last_gc_was_young(), "must be");
-    return LastYoungGC;
-  } else if (!collector_state()->last_gc_was_young()) {
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
-    return MixedGC;
-  } else {
-    assert(collector_state()->last_gc_was_young(), "must be");
-    assert(!collector_state()->during_initial_mark_pause(), "must be");
-    assert(!collector_state()->last_young_gc(), "must be");
-    return YoungOnlyGC;
-  }
-}
-
-void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) {
-  // Manage the MMU tracker. For some reason it ignores Full GCs.
-  if (kind != FullGC) {
-    _mmu_tracker->add_pause(start, end);
-  }
-  // Manage the mutator time tracking from initial mark to first mixed gc.
-  switch (kind) {
-    case FullGC:
-      abort_time_to_mixed_tracking();
-      break;
-    case Cleanup:
-    case Remark:
-    case YoungOnlyGC:
-    case LastYoungGC:
-      _initial_mark_to_mixed.add_pause(end - start);
-      break;
-    case InitialMarkGC:
-      _initial_mark_to_mixed.record_initial_mark_end(end);
-      break;
-    case MixedGC:
-      _initial_mark_to_mixed.record_mixed_gc_start(start);
-      break;
-    default:
-      ShouldNotReachHere();
-  }
-}
-
-void G1CollectorPolicy::abort_time_to_mixed_tracking() {
-  _initial_mark_to_mixed.reset();
-}
-
-bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
-                                                const char* false_action_str) const {
-  if (cset_chooser()->is_empty()) {
-    log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
-    return false;
-  }
-
-  // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
-  size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
-  double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
-  double threshold = (double) G1HeapWastePercent;
-  if (reclaimable_perc <= threshold) {
-    log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
-                        false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
-    return false;
-  }
-  log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
-                      true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
-  return true;
-}
-
-uint G1CollectorPolicy::calc_min_old_cset_length() const {
-  // The min old CSet region bound is based on the maximum desired
-  // number of mixed GCs after a cycle. I.e., even if some old regions
-  // look expensive, we should add them to the CSet anyway to make
-  // sure we go through the available old regions in no more than the
-  // maximum desired number of mixed GCs.
-  //
-  // The calculation is based on the number of marked regions we added
-  // to the CSet chooser in the first place, not how many remain, so
-  // that the result is the same during all mixed GCs that follow a cycle.
-
-  const size_t region_num = (size_t) cset_chooser()->length();
-  const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
-  size_t result = region_num / gc_num;
-  // emulate ceiling
-  if (result * gc_num < region_num) {
-    result += 1;
-  }
-  return (uint) result;
-}
-
-uint G1CollectorPolicy::calc_max_old_cset_length() const {
-  // The max old CSet region bound is based on the threshold expressed
-  // as a percentage of the heap size. I.e., it should bound the
-  // number of old regions added to the CSet irrespective of how many
-  // of them are available.
-
-  const G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  const size_t region_num = g1h->num_regions();
-  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
-  size_t result = region_num * perc / 100;
-  // emulate ceiling
-  if (100 * result < region_num * perc) {
-    result += 1;
-  }
-  return (uint) result;
-}
-
-void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) {
-  double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
-  _collection_set->finalize_old_part(time_remaining_ms);
-}
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -25,419 +25,27 @@
 #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
 #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
 
-#include "gc/g1/g1CollectorState.hpp"
-#include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1InCSetState.hpp"
-#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
-#include "gc/g1/g1MMUTracker.hpp"
-#include "gc/g1/g1Predictions.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "utilities/pair.hpp"
 
-// A G1CollectorPolicy makes policy decisions that determine the
-// characteristics of the collector.  Examples include:
-//   * choice of collection set.
-//   * when to collect.
+// G1CollectorPolicy is primarily used during initialization and to expose the
+// functionality of the CollectorPolicy interface to the rest of the VM.
 
-class HeapRegion;
-class G1CollectionSet;
-class CollectionSetChooser;
-class G1IHOPControl;
-class G1Analytics;
 class G1YoungGenSizer;
 
 class G1CollectorPolicy: public CollectorPolicy {
- private:
-  G1IHOPControl* _ihop_control;
-
-  G1IHOPControl* create_ihop_control() const;
-  // Update the IHOP control with necessary statistics.
-  void update_ihop_prediction(double mutator_time_s,
-                              size_t mutator_alloc_bytes,
-                              size_t young_gen_size);
-  void report_ihop_statistics();
-
-  G1Predictions _predictor;
-  G1Analytics* _analytics;
-  G1MMUTracker* _mmu_tracker;
-
+protected:
   void initialize_alignments();
   void initialize_flags();
 
-  double _full_collection_start_sec;
-
-  uint _young_list_target_length;
-  uint _young_list_fixed_length;
-
-  // The max number of regions we can extend the eden by while the GC
-  // locker is active. This should be >= _young_list_target_length;
-  uint _young_list_max_length;
-
-  SurvRateGroup* _short_lived_surv_rate_group;
-  SurvRateGroup* _survivor_surv_rate_group;
-
-  double _reserve_factor;
-  uint   _reserve_regions;
-
-  G1YoungGenSizer* _young_gen_sizer;
-
-  uint _free_regions_at_end_of_collection;
-
-  size_t _max_rs_lengths;
-
-  size_t _rs_lengths_prediction;
-
-#ifndef PRODUCT
-  bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
-#endif // PRODUCT
-
-  double _pause_time_target_ms;
-
-  size_t _pending_cards;
-
-  // The amount of allocated bytes in old gen during the last mutator and the following
-  // young GC phase.
-  size_t _bytes_allocated_in_old_since_last_gc;
-
-  G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
 public:
-  const G1Predictions& predictor() const { return _predictor; }
-  const G1Analytics* analytics()   const { return const_cast<const G1Analytics*>(_analytics); }
-
-  // Add the given number of bytes to the total number of allocated bytes in the old gen.
-  void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
-
-  // Accessors
-
-  void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
-    hr->set_eden();
-    hr->install_surv_rate_group(_short_lived_surv_rate_group);
-    hr->set_young_index_in_cset(young_index_in_cset);
-  }
-
-  void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
-    assert(hr->is_survivor(), "pre-condition");
-    hr->install_surv_rate_group(_survivor_surv_rate_group);
-    hr->set_young_index_in_cset(young_index_in_cset);
-  }
-
-#ifndef PRODUCT
-  bool verify_young_ages();
-#endif // PRODUCT
-
-  void record_max_rs_lengths(size_t rs_lengths) {
-    _max_rs_lengths = rs_lengths;
-  }
-
-
-  double predict_base_elapsed_time_ms(size_t pending_cards) const;
-  double predict_base_elapsed_time_ms(size_t pending_cards,
-                                      size_t scanned_cards) const;
-  size_t predict_bytes_to_copy(HeapRegion* hr) const;
-  double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
-
-  double predict_survivor_regions_evac_time() const;
-
-  bool should_update_surv_rate_group_predictors() {
-    return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
-  }
-
-  void cset_regions_freed() {
-    bool update = should_update_surv_rate_group_predictors();
-
-    _short_lived_surv_rate_group->all_surviving_words_recorded(update);
-    _survivor_surv_rate_group->all_surviving_words_recorded(update);
-  }
-
-  G1MMUTracker* mmu_tracker() {
-    return _mmu_tracker;
-  }
-
-  const G1MMUTracker* mmu_tracker() const {
-    return _mmu_tracker;
-  }
-
-  double max_pause_time_ms() const {
-    return _mmu_tracker->max_gc_time() * 1000.0;
-  }
-
-  // Returns an estimate of the survival rate of the region at yg-age
-  // "yg_age".
-  double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
-
-  double predict_yg_surv_rate(int age) const;
-
-  double accum_yg_surv_rate_pred(int age) const;
-
-protected:
-  G1CollectionSet* _collection_set;
-  virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
-  virtual double other_time_ms(double pause_time_ms) const;
-
-  double young_other_time_ms() const;
-  double non_young_other_time_ms() const;
-  double constant_other_time_ms(double pause_time_ms) const;
-
-  CollectionSetChooser* cset_chooser() const;
-private:
-
-  // The number of bytes copied during the GC.
-  size_t _bytes_copied_during_gc;
-
-  // Stash a pointer to the g1 heap.
-  G1CollectedHeap* _g1;
-
-  G1GCPhaseTimes* _phase_times;
-
-  // This set of variables tracks the collector efficiency, in order to
-  // determine whether we should initiate a new marking.
-  double _mark_remark_start_sec;
-  double _mark_cleanup_start_sec;
-
-  // Updates the internal young list maximum and target lengths. Returns the
-  // unbounded young list target length.
-  uint update_young_list_max_and_target_length();
-  uint update_young_list_max_and_target_length(size_t rs_lengths);
-
-  // Update the young list target length either by setting it to the
-  // desired fixed value or by calculating it using G1's pause
-  // prediction model. If no rs_lengths parameter is passed, predict
-  // the RS lengths using the prediction model, otherwise use the
-  // given rs_lengths as the prediction.
-  // Returns the unbounded young list target length.
-  uint update_young_list_target_length(size_t rs_lengths);
-
-  // Calculate and return the minimum desired young list target
-  // length. This is the minimum desired young list length according
-  // to the user's inputs.
-  uint calculate_young_list_desired_min_length(uint base_min_length) const;
-
-  // Calculate and return the maximum desired young list target
-  // length. This is the maximum desired young list length according
-  // to the user's inputs.
-  uint calculate_young_list_desired_max_length() const;
-
-  // Calculate and return the maximum young list target length that
-  // can fit into the pause time goal. The parameters are: rs_lengths
-  // represent the prediction of how large the young RSet lengths will
-  // be, base_min_length is the already existing number of regions in
-  // the young list, min_length and max_length are the desired min and
-  // max young list length according to the user's inputs.
-  uint calculate_young_list_target_length(size_t rs_lengths,
-                                          uint base_min_length,
-                                          uint desired_min_length,
-                                          uint desired_max_length) const;
-
-  // Result of the bounded_young_list_target_length() method, containing both the
-  // bounded as well as the unbounded young list target lengths in this order.
-  typedef Pair<uint, uint, StackObj> YoungTargetLengths;
-  YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
-
-  void update_rs_lengths_prediction();
-  void update_rs_lengths_prediction(size_t prediction);
-
-  // Check whether a given young length (young_length) fits into the
-  // given target pause time and whether the prediction for the amount
-  // of objects to be copied for the given length will fit into the
-  // given free space (expressed by base_free_regions).  It is used by
-  // calculate_young_list_target_length().
-  bool predict_will_fit(uint young_length, double base_time_ms,
-                        uint base_free_regions, double target_pause_time_ms) const;
-
-public:
-  size_t pending_cards() const { return _pending_cards; }
-
-  // Calculate the minimum number of old regions we'll add to the CSet
-  // during a mixed GC.
-  uint calc_min_old_cset_length() const;
-
-  // Calculate the maximum number of old regions we'll add to the CSet
-  // during a mixed GC.
-  uint calc_max_old_cset_length() const;
-
-  // Returns the given amount of uncollected reclaimable space
-  // as a percentage of the current heap capacity.
-  double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
-
-private:
-  // Sets up marking if proper conditions are met.
-  void maybe_start_marking();
-
-  // The kind of STW pause.
-  enum PauseKind {
-    FullGC,
-    YoungOnlyGC,
-    MixedGC,
-    LastYoungGC,
-    InitialMarkGC,
-    Cleanup,
-    Remark
-  };
-
-  // Calculate PauseKind from internal state.
-  PauseKind young_gc_pause_kind() const;
-  // Record the given STW pause with the given start and end times (in s).
-  void record_pause(PauseKind kind, double start, double end);
-  // Indicate that we aborted marking before doing any mixed GCs.
-  void abort_time_to_mixed_tracking();
-public:
-
   G1CollectorPolicy();
 
-  virtual ~G1CollectorPolicy();
-
-  virtual G1CollectorPolicy* as_g1_policy() { return this; }
-
-  G1CollectorState* collector_state() const;
-
-  G1GCPhaseTimes* phase_times() const { return _phase_times; }
+  G1CollectorPolicy* as_g1_policy() { return this; }
 
-  // Check the current value of the young list RSet lengths and
-  // compare it against the last prediction. If the current value is
-  // higher, recalculate the young list target length prediction.
-  void revise_young_list_target_length_if_necessary(size_t rs_lengths);
-
-  // This should be called after the heap is resized.
-  void record_new_heap_size(uint new_number_of_regions);
-
-  void init();
-
-  virtual void note_gc_start();
+  void post_heap_initialize() {} // Nothing needed.
 
   // Create jstat counters for the policy.
   virtual void initialize_gc_policy_counters();
-
-  bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
-
-  bool about_to_start_mixed_phase() const;
-
-  // Record the start and end of an evacuation pause.
-  void record_collection_pause_start(double start_time_sec);
-  void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
-
-  // Record the start and end of a full collection.
-  void record_full_collection_start();
-  void record_full_collection_end();
-
-  // Must currently be called while the world is stopped.
-  void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
-
-  // Record start and end of remark.
-  void record_concurrent_mark_remark_start();
-  void record_concurrent_mark_remark_end();
-
-  // Record start, end, and completion of cleanup.
-  void record_concurrent_mark_cleanup_start();
-  void record_concurrent_mark_cleanup_end();
-  void record_concurrent_mark_cleanup_completed();
-
-  virtual void print_phases();
-
-  // Record how much space we copied during a GC. This is typically
-  // called when a GC alloc region is being retired.
-  void record_bytes_copied_during_gc(size_t bytes) {
-    _bytes_copied_during_gc += bytes;
-  }
-
-  // The amount of space we copied during a GC.
-  size_t bytes_copied_during_gc() const {
-    return _bytes_copied_during_gc;
-  }
-
-  // Determine whether there are candidate regions so that the
-  // next GC should be mixed. The two action strings are used
-  // in the ergo output when the method returns true or false.
-  bool next_gc_should_be_mixed(const char* true_action_str,
-                               const char* false_action_str) const;
-
-  virtual void finalize_collection_set(double target_pause_time_ms);
-private:
-  // Set the state to start a concurrent marking cycle and clear
-  // _initiate_conc_mark_if_possible because it has now been
-  // acted on.
-  void initiate_conc_mark();
-
-public:
-  // This sets the initiate_conc_mark_if_possible() flag to start a
-  // new cycle, as long as we are not already in one. It's best if it
-  // is called during a safepoint when the test whether a cycle is in
-  // progress or not is stable.
-  bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
-
-  // This is called at the very beginning of an evacuation pause (it
-  // has to be the first thing that the pause does). If
-  // initiate_conc_mark_if_possible() is true, and the concurrent
-  // marking thread has completed its work during the previous cycle,
-  // it will set during_initial_mark_pause() to so that the pause does
-  // the initial-mark work and start a marking cycle.
-  void decide_on_conc_mark_initiation();
-
-  // Print stats on young survival ratio
-  void print_yg_surv_rate_info() const;
-
-  void finished_recalculating_age_indexes(bool is_survivors) {
-    if (is_survivors) {
-      _survivor_surv_rate_group->finished_recalculating_age_indexes();
-    } else {
-      _short_lived_surv_rate_group->finished_recalculating_age_indexes();
-    }
-  }
-
-  size_t young_list_target_length() const { return _young_list_target_length; }
-
-  bool is_young_list_full() const;
-
-  bool can_expand_young_list() const;
-
-  uint young_list_max_length() const {
-    return _young_list_max_length;
-  }
-
-  bool adaptive_young_list_length() const;
-
-  virtual bool should_process_references() const {
-    return true;
-  }
-
-private:
-  //
-  // Survivor regions policy.
-  //
-
-  // Current tenuring threshold, set to 0 if the collector reaches the
-  // maximum amount of survivors regions.
-  uint _tenuring_threshold;
-
-  // The limit on the number of regions allocated for survivors.
-  uint _max_survivor_regions;
-
-  AgeTable _survivors_age_table;
-
-public:
-  uint tenuring_threshold() const { return _tenuring_threshold; }
-
-  uint max_survivor_regions() {
-    return _max_survivor_regions;
-  }
-
-  void note_start_adding_survivor_regions() {
-    _survivor_surv_rate_group->start_adding_regions();
-  }
-
-  void note_stop_adding_survivor_regions() {
-    _survivor_surv_rate_group->stop_adding_regions();
-  }
-
-  void record_age_table(AgeTable* age_table) {
-    _survivors_age_table.merge(age_table);
-  }
-
-  void update_max_gc_locker_expansion();
-
-  // Calculates survivor space parameters.
-  void update_survivors_policy();
-
-  virtual void post_heap_initialize();
 };
 
 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -28,12 +28,12 @@
 #include "code/codeCache.hpp"
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1CardLiveData.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
@@ -739,8 +739,8 @@
 };
 
 void G1ConcurrentMark::checkpointRootsInitialPre() {
-  G1CollectedHeap*   g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1Policy* g1p = g1h->g1_policy();
 
   _has_aborted = false;
 
@@ -1056,7 +1056,7 @@
   }
   g1h->verifier()->check_bitmaps("Remark Start");
 
-  G1CollectorPolicy* g1p = g1h->g1_policy();
+  G1Policy* g1p = g1h->g1_policy();
   g1p->record_concurrent_mark_remark_start();
 
   double start = os::elapsedTime();
@@ -1144,8 +1144,6 @@
     if (hr->is_archive()) {
       return false;
     }
-    // We use a claim value of zero here because all regions
-    // were claimed with value 1 in the FinalCount task.
     _g1->reset_gc_time_stamps(hr);
     hr->note_end_of_marking();
 
@@ -1240,7 +1238,7 @@
   }
   g1h->verifier()->check_bitmaps("Cleanup Start");
 
-  G1CollectorPolicy* g1p = g1h->g1_policy();
+  G1Policy* g1p = g1h->g1_policy();
   g1p->record_concurrent_mark_cleanup_start();
 
   double start = os::elapsedTime();
@@ -2609,7 +2607,7 @@
   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
   assert(concurrent() == _cm->concurrent(), "they should be the same");
 
-  G1CollectorPolicy* g1_policy = _g1h->g1_policy();
+  G1Policy* g1_policy = _g1h->g1_policy();
   assert(_task_queues != NULL, "invariant");
   assert(_task_queue != NULL, "invariant");
   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
--- a/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -227,15 +227,6 @@
                                                during_conc_mark);
         _g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 
-        // In the common case (i.e. when there is no evacuation
-        // failure) we make sure that the following is done when
-        // the region is freed so that it is "ready-to-go" when it's
-        // re-allocated. However, when evacuation failure happens, a
-        // region will remain in the heap and might ultimately be added
-        // to a CSet in the future. So we have to be careful here and
-        // make sure the region's RSet is ready for parallel iteration
-        // whenever this might be required in the future.
-        hr->rem_set()->reset_for_par_iteration();
         hr->reset_bot();
 
         size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark);
--- a/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
+#include "gc/g1/g1_globals.hpp"
 #include "gc/g1/g1EvacStats.hpp"
 #include "gc/shared/gcId.hpp"
 #include "logging/log.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -24,8 +24,8 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1HeapTransition.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "logging/log.hpp"
 #include "memory/metaspace.hpp"
 
--- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -29,6 +29,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -24,8 +24,8 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
+#include "gc/g1/g1Policy.hpp"
 
 G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
                                            const char* name,
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -27,8 +27,8 @@
 
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1OopClosures.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/shared/ageTable.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1Policy.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/concurrentG1Refine.hpp"
+#include "gc/g1/concurrentMarkThread.inline.hpp"
+#include "gc/g1/g1Analytics.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1CollectionSet.hpp"
+#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1IHOPControl.hpp"
+#include "gc/g1/g1GCPhaseTimes.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1YoungGenSizer.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/shared/gcPolicyCounters.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/java.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/pair.hpp"
+
+G1Policy::G1Policy() :
+  _predictor(G1ConfidencePercent / 100.0),
+  _analytics(new G1Analytics(&_predictor)),
+  _pause_time_target_ms((double) MaxGCPauseMillis),
+  _rs_lengths_prediction(0),
+  _max_survivor_regions(0),
+  _survivors_age_table(true),
+
+  _bytes_allocated_in_old_since_last_gc(0),
+  _ihop_control(NULL),
+  _initial_mark_to_mixed() {
+
+  // SurvRateGroups below must be initialized after the predictor because they
+  // indirectly use it through this object passed to their constructor.
+  _short_lived_surv_rate_group =
+    new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
+  _survivor_surv_rate_group =
+    new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
+
+  _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
+
+  double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
+  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
+  _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
+
+  _tenuring_threshold = MaxTenuringThreshold;
+
+
+  guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50.");
+  _reserve_factor = (double) G1ReservePercent / 100.0;
+  // This will be set when the heap is expanded
+  // for the first time during initialization.
+  _reserve_regions = 0;
+
+  _ihop_control = create_ihop_control();
+}
+
+G1Policy::~G1Policy() {
+  delete _ihop_control;
+}
+
+G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); }
+
+void G1Policy::init() {
+  // Set aside an initial future to_space.
+  _g1 = G1CollectedHeap::heap();
+  _collection_set = _g1->collection_set();
+
+  assert(Heap_lock->owned_by_self(), "Locking discipline.");
+
+  _g1->collector_policy()->initialize_gc_policy_counters();
+
+  if (adaptive_young_list_length()) {
+    _young_list_fixed_length = 0;
+  } else {
+    _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
+  }
+  _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
+
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
+
+  update_young_list_max_and_target_length();
+  // We may immediately start allocating regions and placing them on the
+  // collection set list. Initialize the per-collection set info
+  _collection_set->start_incremental_building();
+}
+
+void G1Policy::note_gc_start() {
+  phase_times()->note_gc_start();
+}
+
+bool G1Policy::predict_will_fit(uint young_length,
+                                double base_time_ms,
+                                uint base_free_regions,
+                                double target_pause_time_ms) const {
+  if (young_length >= base_free_regions) {
+    // end condition 1: not enough space for the young regions
+    return false;
+  }
+
+  double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
+  size_t bytes_to_copy =
+               (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
+  double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,
+                                                                collector_state()->during_concurrent_mark());
+  double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length);
+  double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
+  if (pause_time_ms > target_pause_time_ms) {
+    // end condition 2: prediction is over the target pause time
+    return false;
+  }
+
+  size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
+
+  // When copying, we will likely need more bytes free than is live in the region.
+  // Add some safety margin to factor in the confidence of our guess, and the
+  // natural expected waste.
+  // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
+  // of the calculation: the lower the confidence, the more headroom.
+  // (100 + TargetPLABWastePct) represents the increase in expected bytes during
+  // copying due to anticipated waste in the PLABs.
+  double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
+  size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
+
+  if (expected_bytes_to_copy > free_bytes) {
+    // end condition 3: out-of-space
+    return false;
+  }
+
+  // success!
+  return true;
+}
+
+void G1Policy::record_new_heap_size(uint new_number_of_regions) {
+  // re-calculate the necessary reserve
+  double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
+  // We use ceiling so that if reserve_regions_d is > 0.0 (but
+  // smaller than 1.0) we'll get 1.
+  _reserve_regions = (uint) ceil(reserve_regions_d);
+
+  _young_gen_sizer.heap_size_changed(new_number_of_regions);
+
+  _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
+}
+
+uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
+  uint desired_min_length = 0;
+  if (adaptive_young_list_length()) {
+    if (_analytics->num_alloc_rate_ms() > 3) {
+      double now_sec = os::elapsedTime();
+      double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
+      double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
+      desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
+    } else {
+      // otherwise we don't have enough info to make the prediction
+    }
+  }
+  desired_min_length += base_min_length;
+  // make sure we don't go below any user-defined minimum bound
+  return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
+}
+
+uint G1Policy::calculate_young_list_desired_max_length() const {
+  // Here, we might want to also take into account any additional
+  // constraints (i.e., user-defined minimum bound). Currently, we
+  // effectively don't set this bound.
+  return _young_gen_sizer.max_desired_young_length();
+}
+
+uint G1Policy::update_young_list_max_and_target_length() {
+  return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
+}
+
+uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
+  uint unbounded_target_length = update_young_list_target_length(rs_lengths);
+  update_max_gc_locker_expansion();
+  return unbounded_target_length;
+}
+
+uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
+  YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
+  _young_list_target_length = young_lengths.first;
+  return young_lengths.second;
+}
+
+G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengths) const {
+  YoungTargetLengths result;
+
+  // Calculate the absolute and desired min bounds first.
+
+  // This is how many young regions we already have (currently: the survivors).
+  const uint base_min_length = _g1->young_list()->survivor_length();
+  uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
+  // This is the absolute minimum young length. Ensure that we
+  // will at least have one eden region available for allocation.
+  uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
+  // If we shrank the young list target it should not shrink below the current size.
+  desired_min_length = MAX2(desired_min_length, absolute_min_length);
+  // Calculate the absolute and desired max bounds.
+
+  uint desired_max_length = calculate_young_list_desired_max_length();
+
+  uint young_list_target_length = 0;
+  if (adaptive_young_list_length()) {
+    if (collector_state()->gcs_are_young()) {
+      young_list_target_length =
+                        calculate_young_list_target_length(rs_lengths,
+                                                           base_min_length,
+                                                           desired_min_length,
+                                                           desired_max_length);
+    } else {
+      // Don't calculate anything and let the code below bound it to
+      // the desired_min_length, i.e., do the next GC as soon as
+      // possible to maximize how many old regions we can add to it.
+    }
+  } else {
+    // The user asked for a fixed young gen so we'll fix the young gen
+    // whether the next GC is young or mixed.
+    young_list_target_length = _young_list_fixed_length;
+  }
+
+  result.second = young_list_target_length;
+
+  // We will try our best not to "eat" into the reserve.
+  uint absolute_max_length = 0;
+  if (_free_regions_at_end_of_collection > _reserve_regions) {
+    absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
+  }
+  if (desired_max_length > absolute_max_length) {
+    desired_max_length = absolute_max_length;
+  }
+
+  // Make sure we don't go over the desired max length, nor under the
+  // desired min length. In case they clash, desired_min_length wins
+  // which is why that test is second.
+  if (young_list_target_length > desired_max_length) {
+    young_list_target_length = desired_max_length;
+  }
+  if (young_list_target_length < desired_min_length) {
+    young_list_target_length = desired_min_length;
+  }
+
+  assert(young_list_target_length > base_min_length,
+         "we should be able to allocate at least one eden region");
+  assert(young_list_target_length >= absolute_min_length, "post-condition");
+
+  result.first = young_list_target_length;
+  return result;
+}
+
+uint
+G1Policy::calculate_young_list_target_length(size_t rs_lengths,
+                                             uint base_min_length,
+                                             uint desired_min_length,
+                                             uint desired_max_length) const {
+  assert(adaptive_young_list_length(), "pre-condition");
+  assert(collector_state()->gcs_are_young(), "only call this for young GCs");
+
+  // In case some edge-condition makes the desired max length too small...
+  if (desired_max_length <= desired_min_length) {
+    return desired_min_length;
+  }
+
+  // We'll adjust min_young_length and max_young_length not to include
+  // the already allocated young regions (i.e., so they reflect the
+  // min and max eden regions we'll allocate). The base_min_length
+  // will be reflected in the predictions by the
+  // survivor_regions_evac_time prediction.
+  assert(desired_min_length > base_min_length, "invariant");
+  uint min_young_length = desired_min_length - base_min_length;
+  assert(desired_max_length > base_min_length, "invariant");
+  uint max_young_length = desired_max_length - base_min_length;
+
+  double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
+  double survivor_regions_evac_time = predict_survivor_regions_evac_time();
+  size_t pending_cards = _analytics->predict_pending_cards();
+  size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
+  size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
+  double base_time_ms =
+    predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
+    survivor_regions_evac_time;
+  uint available_free_regions = _free_regions_at_end_of_collection;
+  uint base_free_regions = 0;
+  if (available_free_regions > _reserve_regions) {
+    base_free_regions = available_free_regions - _reserve_regions;
+  }
+
+  // Here, we will make sure that the shortest young length that
+  // makes sense fits within the target pause time.
+
+  if (predict_will_fit(min_young_length, base_time_ms,
+                       base_free_regions, target_pause_time_ms)) {
+    // The shortest young length will fit into the target pause time;
+    // we'll now check whether the absolute maximum number of young
+    // regions will fit in the target pause time. If not, we'll do
+    // a binary search between min_young_length and max_young_length.
+    if (predict_will_fit(max_young_length, base_time_ms,
+                         base_free_regions, target_pause_time_ms)) {
+      // The maximum young length will fit into the target pause time.
+      // We are done so set min young length to the maximum length (as
+      // the result is assumed to be returned in min_young_length).
+      min_young_length = max_young_length;
+    } else {
+      // The maximum possible number of young regions will not fit within
+      // the target pause time so we'll search for the optimal
+      // length. The loop invariants are:
+      //
+      // min_young_length < max_young_length
+      // min_young_length is known to fit into the target pause time
+      // max_young_length is known not to fit into the target pause time
+      //
+      // Going into the loop we know the above hold as we've just
+      // checked them. Every time around the loop we check whether
+      // the middle value between min_young_length and
+      // max_young_length fits into the target pause time. If it
+      // does, it becomes the new min. If it doesn't, it becomes
+      // the new max. This way we maintain the loop invariants.
+
+      assert(min_young_length < max_young_length, "invariant");
+      uint diff = (max_young_length - min_young_length) / 2;
+      while (diff > 0) {
+        uint young_length = min_young_length + diff;
+        if (predict_will_fit(young_length, base_time_ms,
+                             base_free_regions, target_pause_time_ms)) {
+          min_young_length = young_length;
+        } else {
+          max_young_length = young_length;
+        }
+        assert(min_young_length <  max_young_length, "invariant");
+        diff = (max_young_length - min_young_length) / 2;
+      }
+      // The results is min_young_length which, according to the
+      // loop invariants, should fit within the target pause time.
+
+      // These are the post-conditions of the binary search above:
+      assert(min_young_length < max_young_length,
+             "otherwise we should have discovered that max_young_length "
+             "fits into the pause target and not done the binary search");
+      assert(predict_will_fit(min_young_length, base_time_ms,
+                              base_free_regions, target_pause_time_ms),
+             "min_young_length, the result of the binary search, should "
+             "fit into the pause target");
+      assert(!predict_will_fit(min_young_length + 1, base_time_ms,
+                               base_free_regions, target_pause_time_ms),
+             "min_young_length, the result of the binary search, should be "
+             "optimal, so no larger length should fit into the pause target");
+    }
+  } else {
+    // Even the minimum length doesn't fit into the pause time
+    // target, return it as the result nevertheless.
+  }
+  return base_min_length + min_young_length;
+}
+
+double G1Policy::predict_survivor_regions_evac_time() const {
+  double survivor_regions_evac_time = 0.0;
+  for (HeapRegion * r = _g1->young_list()->first_survivor_region();
+       r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
+       r = r->get_next_young_region()) {
+    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
+  }
+  return survivor_regions_evac_time;
+}
+
+void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
+  guarantee( adaptive_young_list_length(), "should not call this otherwise" );
+
+  if (rs_lengths > _rs_lengths_prediction) {
+    // add 10% to avoid having to recalculate often
+    size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
+    update_rs_lengths_prediction(rs_lengths_prediction);
+
+    update_young_list_max_and_target_length(rs_lengths_prediction);
+  }
+}
+
+void G1Policy::update_rs_lengths_prediction() {
+  update_rs_lengths_prediction(_analytics->predict_rs_lengths());
+}
+
+void G1Policy::update_rs_lengths_prediction(size_t prediction) {
+  if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
+    _rs_lengths_prediction = prediction;
+  }
+}
+
+#ifndef PRODUCT
+bool G1Policy::verify_young_ages() {
+  HeapRegion* head = _g1->young_list()->first_region();
+  return
+    verify_young_ages(head, _short_lived_surv_rate_group);
+  // also call verify_young_ages on any additional surv rate groups
+}
+
+bool G1Policy::verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group) {
+  guarantee( surv_rate_group != NULL, "pre-condition" );
+
+  const char* name = surv_rate_group->name();
+  bool ret = true;
+  int prev_age = -1;
+
+  for (HeapRegion* curr = head;
+       curr != NULL;
+       curr = curr->get_next_young_region()) {
+    SurvRateGroup* group = curr->surv_rate_group();
+    if (group == NULL && !curr->is_survivor()) {
+      log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name);
+      ret = false;
+    }
+
+    if (surv_rate_group == group) {
+      int age = curr->age_in_surv_rate_group();
+
+      if (age < 0) {
+        log_error(gc, verify)("## %s: encountered negative age", name);
+        ret = false;
+      }
+
+      if (age <= prev_age) {
+        log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
+        ret = false;
+      }
+      prev_age = age;
+    }
+  }
+
+  return ret;
+}
+#endif // PRODUCT
+
+void G1Policy::record_full_collection_start() {
+  _full_collection_start_sec = os::elapsedTime();
+  // Release the future to-space so that it is available for compaction into.
+  collector_state()->set_full_collection(true);
+}
+
+void G1Policy::record_full_collection_end() {
+  // Consider this like a collection pause for the purposes of allocation
+  // since last pause.
+  double end_sec = os::elapsedTime();
+  double full_gc_time_sec = end_sec - _full_collection_start_sec;
+  double full_gc_time_ms = full_gc_time_sec * 1000.0;
+
+  _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
+
+  collector_state()->set_full_collection(false);
+
+  // "Nuke" the heuristics that control the young/mixed GC
+  // transitions and make sure we start with young GCs after the Full GC.
+  collector_state()->set_gcs_are_young(true);
+  collector_state()->set_last_young_gc(false);
+  collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
+  collector_state()->set_during_initial_mark_pause(false);
+  collector_state()->set_in_marking_window(false);
+  collector_state()->set_in_marking_window_im(false);
+
+  _short_lived_surv_rate_group->start_adding_regions();
+  // also call this on any additional surv rate groups
+
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
+  // Reset survivors SurvRateGroup.
+  _survivor_surv_rate_group->reset();
+  update_young_list_max_and_target_length();
+  update_rs_lengths_prediction();
+  cset_chooser()->clear();
+
+  _bytes_allocated_in_old_since_last_gc = 0;
+
+  record_pause(FullGC, _full_collection_start_sec, end_sec);
+}
+
+void G1Policy::record_collection_pause_start(double start_time_sec) {
+  // We only need to do this here as the policy will only be applied
+  // to the GC we're about to start. so, no point is calculating this
+  // every time we calculate / recalculate the target young length.
+  update_survivors_policy();
+
+  assert(_g1->used() == _g1->recalculate_used(),
+         "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
+         _g1->used(), _g1->recalculate_used());
+
+  phase_times()->record_cur_collection_start_sec(start_time_sec);
+  _pending_cards = _g1->pending_card_num();
+
+  _collection_set->reset_bytes_used_before();
+  _bytes_copied_during_gc = 0;
+
+  collector_state()->set_last_gc_was_young(false);
+
+  // do that for any other surv rate groups
+  _short_lived_surv_rate_group->stop_adding_regions();
+  _survivors_age_table.clear();
+
+  assert( verify_young_ages(), "region age verification" );
+}
+
+void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
+  collector_state()->set_during_marking(true);
+  assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
+  collector_state()->set_during_initial_mark_pause(false);
+}
+
+void G1Policy::record_concurrent_mark_remark_start() {
+  _mark_remark_start_sec = os::elapsedTime();
+  collector_state()->set_during_marking(false);
+}
+
+void G1Policy::record_concurrent_mark_remark_end() {
+  double end_time_sec = os::elapsedTime();
+  double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
+  _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
+  _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
+
+  record_pause(Remark, _mark_remark_start_sec, end_time_sec);
+}
+
+void G1Policy::record_concurrent_mark_cleanup_start() {
+  _mark_cleanup_start_sec = os::elapsedTime();
+}
+
+void G1Policy::record_concurrent_mark_cleanup_completed() {
+  bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
+                                                              "skip last young-only gc");
+  collector_state()->set_last_young_gc(should_continue_with_reclaim);
+  // We skip the marking phase.
+  if (!should_continue_with_reclaim) {
+    abort_time_to_mixed_tracking();
+  }
+  collector_state()->set_in_marking_window(false);
+}
+
+double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
+  return phase_times()->average_time_ms(phase);
+}
+
+double G1Policy::young_other_time_ms() const {
+  return phase_times()->young_cset_choice_time_ms() +
+         phase_times()->young_free_cset_time_ms();
+}
+
+double G1Policy::non_young_other_time_ms() const {
+  return phase_times()->non_young_cset_choice_time_ms() +
+         phase_times()->non_young_free_cset_time_ms();
+
+}
+
+double G1Policy::other_time_ms(double pause_time_ms) const {
+  return pause_time_ms -
+         average_time_ms(G1GCPhaseTimes::UpdateRS) -
+         average_time_ms(G1GCPhaseTimes::ScanRS) -
+         average_time_ms(G1GCPhaseTimes::ObjCopy) -
+         average_time_ms(G1GCPhaseTimes::Termination);
+}
+
+double G1Policy::constant_other_time_ms(double pause_time_ms) const {
+  return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
+}
+
+CollectionSetChooser* G1Policy::cset_chooser() const {
+  return _collection_set->cset_chooser();
+}
+
+bool G1Policy::about_to_start_mixed_phase() const {
+  return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
+}
+
+bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
+  if (about_to_start_mixed_phase()) {
+    return false;
+  }
+
+  size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
+
+  size_t cur_used_bytes = _g1->non_young_capacity_bytes();
+  size_t alloc_byte_size = alloc_word_size * HeapWordSize;
+  size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
+
+  bool result = false;
+  if (marking_request_bytes > marking_initiating_used_threshold) {
+    result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
+    log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
+                              result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
+                              cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
+  }
+
+  return result;
+}
+
+// Anything below that is considered to be zero
+#define MIN_TIMER_GRANULARITY 0.0000001
+
+void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
+  double end_time_sec = os::elapsedTime();
+
+  size_t cur_used_bytes = _g1->used();
+  assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
+  bool last_pause_included_initial_mark = false;
+  bool update_stats = !_g1->evacuation_failed();
+
+  NOT_PRODUCT(_short_lived_surv_rate_group->print());
+
+  record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
+
+  last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
+  if (last_pause_included_initial_mark) {
+    record_concurrent_mark_init_end(0.0);
+  } else {
+    maybe_start_marking();
+  }
+
+  double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
+  if (app_time_ms < MIN_TIMER_GRANULARITY) {
+    // This usually happens due to the timer not having the required
+    // granularity. Some Linuxes are the usual culprits.
+    // We'll just set it to something (arbitrarily) small.
+    app_time_ms = 1.0;
+  }
+
+  if (update_stats) {
+    // We maintain the invariant that all objects allocated by mutator
+    // threads will be allocated out of eden regions. So, we can use
+    // the eden region number allocated since the previous GC to
+    // calculate the application's allocate rate. The only exception
+    // to that is humongous objects that are allocated separately. But
+    // given that humongous object allocations do not really affect
+    // either the pause's duration nor when the next pause will take
+    // place we can safely ignore them here.
+    uint regions_allocated = _collection_set->eden_region_length();
+    double alloc_rate_ms = (double) regions_allocated / app_time_ms;
+    _analytics->report_alloc_rate_ms(alloc_rate_ms);
+
+    double interval_ms =
+      (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
+    _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
+    _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
+  }
+
+  bool new_in_marking_window = collector_state()->in_marking_window();
+  bool new_in_marking_window_im = false;
+  if (last_pause_included_initial_mark) {
+    new_in_marking_window = true;
+    new_in_marking_window_im = true;
+  }
+
+  if (collector_state()->last_young_gc()) {
+    // This is supposed to to be the "last young GC" before we start
+    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
+    assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
+
+    if (next_gc_should_be_mixed("start mixed GCs",
+                                "do not start mixed GCs")) {
+      collector_state()->set_gcs_are_young(false);
+    } else {
+      // We aborted the mixed GC phase early.
+      abort_time_to_mixed_tracking();
+    }
+
+    collector_state()->set_last_young_gc(false);
+  }
+
+  if (!collector_state()->last_gc_was_young()) {
+    // This is a mixed GC. Here we decide whether to continue doing
+    // mixed GCs or not.
+    if (!next_gc_should_be_mixed("continue mixed GCs",
+                                 "do not continue mixed GCs")) {
+      collector_state()->set_gcs_are_young(true);
+
+      maybe_start_marking();
+    }
+  }
+
+  _short_lived_surv_rate_group->start_adding_regions();
+  // Do that for any other surv rate groups
+
+  double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
+
+  if (update_stats) {
+    double cost_per_card_ms = 0.0;
+    if (_pending_cards > 0) {
+      cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
+      _analytics->report_cost_per_card_ms(cost_per_card_ms);
+    }
+    _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
+
+    double cost_per_entry_ms = 0.0;
+    if (cards_scanned > 10) {
+      cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
+      _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
+    }
+
+    if (_max_rs_lengths > 0) {
+      double cards_per_entry_ratio =
+        (double) cards_scanned / (double) _max_rs_lengths;
+      _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
+    }
+
+    // This is defensive. For a while _max_rs_lengths could get
+    // smaller than _recorded_rs_lengths which was causing
+    // rs_length_diff to get very large and mess up the RSet length
+    // predictions. The reason was unsafe concurrent updates to the
+    // _inc_cset_recorded_rs_lengths field which the code below guards
+    // against (see CR 7118202). This bug has now been fixed (see CR
+    // 7119027). However, I'm still worried that
+    // _inc_cset_recorded_rs_lengths might still end up somewhat
+    // inaccurate. The concurrent refinement thread calculates an
+    // RSet's length concurrently with other CR threads updating it
+    // which might cause it to calculate the length incorrectly (if,
+    // say, it's in mid-coarsening). So I'll leave in the defensive
+    // conditional below just in case.
+    size_t rs_length_diff = 0;
+    size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
+    if (_max_rs_lengths > recorded_rs_lengths) {
+      rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
+    }
+    _analytics->report_rs_length_diff((double) rs_length_diff);
+
+    size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
+    size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
+    double cost_per_byte_ms = 0.0;
+
+    if (copied_bytes > 0) {
+      cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
+      _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
+    }
+
+    if (_collection_set->young_region_length() > 0) {
+      _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
+                                                        _collection_set->young_region_length());
+    }
+
+    if (_collection_set->old_region_length() > 0) {
+      _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
+                                                            _collection_set->old_region_length());
+    }
+
+    _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
+
+    _analytics->report_pending_cards((double) _pending_cards);
+    _analytics->report_rs_lengths((double) _max_rs_lengths);
+  }
+
+  collector_state()->set_in_marking_window(new_in_marking_window);
+  collector_state()->set_in_marking_window_im(new_in_marking_window_im);
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
+  // IHOP control wants to know the expected young gen length if it were not
+  // restrained by the heap reserve. Using the actual length would make the
+  // prediction too small and the limit the young gen every time we get to the
+  // predicted target occupancy.
+  size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
+  update_rs_lengths_prediction();
+
+  update_ihop_prediction(app_time_ms / 1000.0,
+                         _bytes_allocated_in_old_since_last_gc,
+                         last_unrestrained_young_length * HeapRegion::GrainBytes);
+  _bytes_allocated_in_old_since_last_gc = 0;
+
+  _ihop_control->send_trace_event(_g1->gc_tracer_stw());
+
+  // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
+  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
+
+  if (update_rs_time_goal_ms < scan_hcc_time_ms) {
+    log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
+                                "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
+                                update_rs_time_goal_ms, scan_hcc_time_ms);
+
+    update_rs_time_goal_ms = 0;
+  } else {
+    update_rs_time_goal_ms -= scan_hcc_time_ms;
+  }
+  _g1->concurrent_g1_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
+                                      phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
+                                      update_rs_time_goal_ms);
+
+  cset_chooser()->verify();
+}
+
+G1IHOPControl* G1Policy::create_ihop_control() const {
+  if (G1UseAdaptiveIHOP) {
+    return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
+                                     &_predictor,
+                                     G1ReservePercent,
+                                     G1HeapWastePercent);
+  } else {
+    return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
+  }
+}
+
+void G1Policy::update_ihop_prediction(double mutator_time_s,
+                                      size_t mutator_alloc_bytes,
+                                      size_t young_gen_size) {
+  // Always try to update IHOP prediction. Even evacuation failures give information
+  // about e.g. whether to start IHOP earlier next time.
+
+  // Avoid using really small application times that might create samples with
+  // very high or very low values. They may be caused by e.g. back-to-back gcs.
+  double const min_valid_time = 1e-6;
+
+  bool report = false;
+
+  double marking_to_mixed_time = -1.0;
+  if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
+    marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
+    assert(marking_to_mixed_time > 0.0,
+           "Initial mark to mixed time must be larger than zero but is %.3f",
+           marking_to_mixed_time);
+    if (marking_to_mixed_time > min_valid_time) {
+      _ihop_control->update_marking_length(marking_to_mixed_time);
+      report = true;
+    }
+  }
+
+  // As an approximation for the young gc promotion rates during marking we use
+  // all of them. In many applications there are only a few if any young gcs during
+  // marking, which makes any prediction useless. This increases the accuracy of the
+  // prediction.
+  if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
+    _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
+    report = true;
+  }
+
+  if (report) {
+    report_ihop_statistics();
+  }
+}
+
+void G1Policy::report_ihop_statistics() {
+  _ihop_control->print();
+}
+
+void G1Policy::print_phases() {
+  phase_times()->print();
+}
+
+double G1Policy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
+  TruncatedSeq* seq = surv_rate_group->get_seq(age);
+  guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
+  double pred = _predictor.get_new_prediction(seq);
+  if (pred > 1.0) {
+    pred = 1.0;
+  }
+  return pred;
+}
+
+double G1Policy::predict_yg_surv_rate(int age) const {
+  return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
+}
+
+double G1Policy::accum_yg_surv_rate_pred(int age) const {
+  return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
+}
+
+double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
+                                              size_t scanned_cards) const {
+  return
+    _analytics->predict_rs_update_time_ms(pending_cards) +
+    _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
+    _analytics->predict_constant_other_time_ms();
+}
+
+double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
+  size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
+  size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
+  return predict_base_elapsed_time_ms(pending_cards, card_num);
+}
+
+size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
+  size_t bytes_to_copy;
+  if (hr->is_marked())
+    bytes_to_copy = hr->max_live_bytes();
+  else {
+    assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
+    int age = hr->age_in_surv_rate_group();
+    double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
+    bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
+  }
+  return bytes_to_copy;
+}
+
+double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr,
+                                                bool for_young_gc) const {
+  size_t rs_length = hr->rem_set()->occupied();
+  // Predicting the number of cards is based on which type of GC
+  // we're predicting for.
+  size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
+  size_t bytes_to_copy = predict_bytes_to_copy(hr);
+
+  double region_elapsed_time_ms =
+    _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
+    _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
+
+  // The prediction of the "other" time for this region is based
+  // upon the region type and NOT the GC type.
+  if (hr->is_young()) {
+    region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
+  } else {
+    region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
+  }
+  return region_elapsed_time_ms;
+}
+
+
+void G1Policy::print_yg_surv_rate_info() const {
+#ifndef PRODUCT
+  _short_lived_surv_rate_group->print_surv_rate_summary();
+  // add this call for any other surv rate groups
+#endif // PRODUCT
+}
+
+bool G1Policy::is_young_list_full() const {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_target_length = _young_list_target_length;
+  return young_list_length >= young_list_target_length;
+}
+
+bool G1Policy::can_expand_young_list() const {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_max_length = _young_list_max_length;
+  return young_list_length < young_list_max_length;
+}
+
+bool G1Policy::adaptive_young_list_length() const {
+  return _young_gen_sizer.adaptive_young_list_length();
+}
+
+void G1Policy::update_max_gc_locker_expansion() {
+  uint expansion_region_num = 0;
+  if (GCLockerEdenExpansionPercent > 0) {
+    double perc = (double) GCLockerEdenExpansionPercent / 100.0;
+    double expansion_region_num_d = perc * (double) _young_list_target_length;
+    // We use ceiling so that if expansion_region_num_d is > 0.0 (but
+    // less than 1.0) we'll get 1.
+    expansion_region_num = (uint) ceil(expansion_region_num_d);
+  } else {
+    assert(expansion_region_num == 0, "sanity");
+  }
+  _young_list_max_length = _young_list_target_length + expansion_region_num;
+  assert(_young_list_target_length <= _young_list_max_length, "post-condition");
+}
+
+// Calculates survivor space parameters.
+void G1Policy::update_survivors_policy() {
+  double max_survivor_regions_d =
+                 (double) _young_list_target_length / (double) SurvivorRatio;
+  // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
+  // smaller than 1.0) we'll get 1.
+  _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
+
+  GCPolicyCounters* counters = _g1->collector_policy()->counters();
+  _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
+        HeapRegion::GrainWords * _max_survivor_regions, counters);
+}
+
+bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
+  // We actually check whether we are marking here and not if we are in a
+  // reclamation phase. This means that we will schedule a concurrent mark
+  // even while we are still in the process of reclaiming memory.
+  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
+  if (!during_cycle) {
+    log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
+    collector_state()->set_initiate_conc_mark_if_possible(true);
+    return true;
+  } else {
+    log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
+    return false;
+  }
+}
+
+void G1Policy::initiate_conc_mark() {
+  collector_state()->set_during_initial_mark_pause(true);
+  collector_state()->set_initiate_conc_mark_if_possible(false);
+}
+
+void G1Policy::decide_on_conc_mark_initiation() {
+  // We are about to decide on whether this pause will be an
+  // initial-mark pause.
+
+  // First, collector_state()->during_initial_mark_pause() should not be already set. We
+  // will set it here if we have to. However, it should be cleared by
+  // the end of the pause (it's only set for the duration of an
+  // initial-mark pause).
+  assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
+
+  if (collector_state()->initiate_conc_mark_if_possible()) {
+    // We had noticed on a previous pause that the heap occupancy has
+    // gone over the initiating threshold and we should start a
+    // concurrent marking cycle. So we might initiate one.
+
+    if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
+      // Initiate a new initial mark if there is no marking or reclamation going on.
+      initiate_conc_mark();
+      log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
+    } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
+      // Initiate a user requested initial mark. An initial mark must be young only
+      // GC, so the collector state must be updated to reflect this.
+      collector_state()->set_gcs_are_young(true);
+      collector_state()->set_last_young_gc(false);
+
+      abort_time_to_mixed_tracking();
+      initiate_conc_mark();
+      log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
+    } else {
+      // The concurrent marking thread is still finishing up the
+      // previous cycle. If we start one right now the two cycles
+      // overlap. In particular, the concurrent marking thread might
+      // be in the process of clearing the next marking bitmap (which
+      // we will use for the next cycle if we start one). Starting a
+      // cycle now will be bad given that parts of the marking
+      // information might get cleared by the marking thread. And we
+      // cannot wait for the marking thread to finish the cycle as it
+      // periodically yields while clearing the next marking bitmap
+      // and, if it's in a yield point, it's waiting for us to
+      // finish. So, at this point we will not start a cycle and we'll
+      // let the concurrent marking thread complete the last one.
+      log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
+    }
+  }
+}
+
+void G1Policy::record_concurrent_mark_cleanup_end() {
+  cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
+
+  double end_sec = os::elapsedTime();
+  double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
+  _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
+  _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
+
+  record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
+}
+
+double G1Policy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
+  // Returns the given amount of reclaimable bytes (that represents
+  // the amount of reclaimable space still to be collected) as a
+  // percentage of the current heap capacity.
+  size_t capacity_bytes = _g1->capacity();
+  return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
+}
+
+void G1Policy::maybe_start_marking() {
+  if (need_to_start_conc_mark("end of GC")) {
+    // Note: this might have already been set, if during the last
+    // pause we decided to start a cycle but at the beginning of
+    // this pause we decided to postpone it. That's OK.
+    collector_state()->set_initiate_conc_mark_if_possible(true);
+  }
+}
+
+G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
+  assert(!collector_state()->full_collection(), "must be");
+  if (collector_state()->during_initial_mark_pause()) {
+    assert(collector_state()->last_gc_was_young(), "must be");
+    assert(!collector_state()->last_young_gc(), "must be");
+    return InitialMarkGC;
+  } else if (collector_state()->last_young_gc()) {
+    assert(!collector_state()->during_initial_mark_pause(), "must be");
+    assert(collector_state()->last_gc_was_young(), "must be");
+    return LastYoungGC;
+  } else if (!collector_state()->last_gc_was_young()) {
+    assert(!collector_state()->during_initial_mark_pause(), "must be");
+    assert(!collector_state()->last_young_gc(), "must be");
+    return MixedGC;
+  } else {
+    assert(collector_state()->last_gc_was_young(), "must be");
+    assert(!collector_state()->during_initial_mark_pause(), "must be");
+    assert(!collector_state()->last_young_gc(), "must be");
+    return YoungOnlyGC;
+  }
+}
+
+void G1Policy::record_pause(PauseKind kind, double start, double end) {
+  // Manage the MMU tracker. For some reason it ignores Full GCs.
+  if (kind != FullGC) {
+    _mmu_tracker->add_pause(start, end);
+  }
+  // Manage the mutator time tracking from initial mark to first mixed gc.
+  switch (kind) {
+    case FullGC:
+      abort_time_to_mixed_tracking();
+      break;
+    case Cleanup:
+    case Remark:
+    case YoungOnlyGC:
+    case LastYoungGC:
+      _initial_mark_to_mixed.add_pause(end - start);
+      break;
+    case InitialMarkGC:
+      _initial_mark_to_mixed.record_initial_mark_end(end);
+      break;
+    case MixedGC:
+      _initial_mark_to_mixed.record_mixed_gc_start(start);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+void G1Policy::abort_time_to_mixed_tracking() {
+  _initial_mark_to_mixed.reset();
+}
+
+bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
+                                       const char* false_action_str) const {
+  if (cset_chooser()->is_empty()) {
+    log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
+    return false;
+  }
+
+  // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
+  size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
+  double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
+  double threshold = (double) G1HeapWastePercent;
+  if (reclaimable_perc <= threshold) {
+    log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
+                        false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
+    return false;
+  }
+  log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
+                      true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
+  return true;
+}
+
+uint G1Policy::calc_min_old_cset_length() const {
+  // The min old CSet region bound is based on the maximum desired
+  // number of mixed GCs after a cycle. I.e., even if some old regions
+  // look expensive, we should add them to the CSet anyway to make
+  // sure we go through the available old regions in no more than the
+  // maximum desired number of mixed GCs.
+  //
+  // The calculation is based on the number of marked regions we added
+  // to the CSet chooser in the first place, not how many remain, so
+  // that the result is the same during all mixed GCs that follow a cycle.
+
+  const size_t region_num = (size_t) cset_chooser()->length();
+  const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
+  size_t result = region_num / gc_num;
+  // emulate ceiling
+  if (result * gc_num < region_num) {
+    result += 1;
+  }
+  return (uint) result;
+}
+
+uint G1Policy::calc_max_old_cset_length() const {
+  // The max old CSet region bound is based on the threshold expressed
+  // as a percentage of the heap size. I.e., it should bound the
+  // number of old regions added to the CSet irrespective of how many
+  // of them are available.
+
+  const G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  const size_t region_num = g1h->num_regions();
+  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
+  size_t result = region_num * perc / 100;
+  // emulate ceiling
+  if (100 * result < region_num * perc) {
+    result += 1;
+  }
+  return (uint) result;
+}
+
+void G1Policy::finalize_collection_set(double target_pause_time_ms) {
+  double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
+  _collection_set->finalize_old_part(time_remaining_ms);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1Policy.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1POLICY_HPP
+#define SHARE_VM_GC_G1_G1POLICY_HPP
+
+#include "gc/g1/g1CollectorState.hpp"
+#include "gc/g1/g1GCPhaseTimes.hpp"
+#include "gc/g1/g1InCSetState.hpp"
+#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
+#include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1Predictions.hpp"
+#include "gc/g1/g1YoungGenSizer.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "utilities/pair.hpp"
+
+// A G1Policy makes policy decisions that determine the
+// characteristics of the collector.  Examples include:
+//   * choice of collection set.
+//   * when to collect.
+
+class HeapRegion;
+class G1CollectionSet;
+class CollectionSetChooser;
+class G1IHOPControl;
+class G1Analytics;
+class G1YoungGenSizer;
+
+class G1Policy: public CHeapObj<mtGC> {
+ private:
+  G1IHOPControl* _ihop_control;
+
+  G1IHOPControl* create_ihop_control() const;
+  // Update the IHOP control with necessary statistics.
+  void update_ihop_prediction(double mutator_time_s,
+                              size_t mutator_alloc_bytes,
+                              size_t young_gen_size);
+  void report_ihop_statistics();
+
+  G1Predictions _predictor;
+  G1Analytics* _analytics;
+  G1MMUTracker* _mmu_tracker;
+
+  double _full_collection_start_sec;
+
+  uint _young_list_target_length;
+  uint _young_list_fixed_length;
+
+  // The max number of regions we can extend the eden by while the GC
+  // locker is active. This should be >= _young_list_target_length;
+  uint _young_list_max_length;
+
+  SurvRateGroup* _short_lived_surv_rate_group;
+  SurvRateGroup* _survivor_surv_rate_group;
+
+  double _reserve_factor;
+  uint   _reserve_regions;
+
+  G1YoungGenSizer _young_gen_sizer;
+
+  uint _free_regions_at_end_of_collection;
+
+  size_t _max_rs_lengths;
+
+  size_t _rs_lengths_prediction;
+
+#ifndef PRODUCT
+  bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
+#endif // PRODUCT
+
+  double _pause_time_target_ms;
+
+  size_t _pending_cards;
+
+  // The amount of allocated bytes in old gen during the last mutator and the following
+  // young GC phase.
+  size_t _bytes_allocated_in_old_since_last_gc;
+
+  G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
+public:
+  const G1Predictions& predictor() const { return _predictor; }
+  const G1Analytics* analytics()   const { return const_cast<const G1Analytics*>(_analytics); }
+
+  // Add the given number of bytes to the total number of allocated bytes in the old gen.
+  void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
+
+  // Accessors
+
+  void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
+    hr->set_eden();
+    hr->install_surv_rate_group(_short_lived_surv_rate_group);
+    hr->set_young_index_in_cset(young_index_in_cset);
+  }
+
+  void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
+    assert(hr->is_survivor(), "pre-condition");
+    hr->install_surv_rate_group(_survivor_surv_rate_group);
+    hr->set_young_index_in_cset(young_index_in_cset);
+  }
+
+#ifndef PRODUCT
+  bool verify_young_ages();
+#endif // PRODUCT
+
+  void record_max_rs_lengths(size_t rs_lengths) {
+    _max_rs_lengths = rs_lengths;
+  }
+
+
+  double predict_base_elapsed_time_ms(size_t pending_cards) const;
+  double predict_base_elapsed_time_ms(size_t pending_cards,
+                                      size_t scanned_cards) const;
+  size_t predict_bytes_to_copy(HeapRegion* hr) const;
+  double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
+
+  double predict_survivor_regions_evac_time() const;
+
+  bool should_update_surv_rate_group_predictors() {
+    return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
+  }
+
+  void cset_regions_freed() {
+    bool update = should_update_surv_rate_group_predictors();
+
+    _short_lived_surv_rate_group->all_surviving_words_recorded(update);
+    _survivor_surv_rate_group->all_surviving_words_recorded(update);
+  }
+
+  G1MMUTracker* mmu_tracker() {
+    return _mmu_tracker;
+  }
+
+  const G1MMUTracker* mmu_tracker() const {
+    return _mmu_tracker;
+  }
+
+  double max_pause_time_ms() const {
+    return _mmu_tracker->max_gc_time() * 1000.0;
+  }
+
+  // Returns an estimate of the survival rate of the region at yg-age
+  // "yg_age".
+  double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
+
+  double predict_yg_surv_rate(int age) const;
+
+  double accum_yg_surv_rate_pred(int age) const;
+
+protected:
+  G1CollectionSet* _collection_set;
+  virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
+  virtual double other_time_ms(double pause_time_ms) const;
+
+  double young_other_time_ms() const;
+  double non_young_other_time_ms() const;
+  double constant_other_time_ms(double pause_time_ms) const;
+
+  CollectionSetChooser* cset_chooser() const;
+private:
+
+  // The number of bytes copied during the GC.
+  size_t _bytes_copied_during_gc;
+
+  // Stash a pointer to the g1 heap.
+  G1CollectedHeap* _g1;
+
+  G1GCPhaseTimes* _phase_times;
+
+  // This set of variables tracks the collector efficiency, in order to
+  // determine whether we should initiate a new marking.
+  double _mark_remark_start_sec;
+  double _mark_cleanup_start_sec;
+
+  // Updates the internal young list maximum and target lengths. Returns the
+  // unbounded young list target length.
+  uint update_young_list_max_and_target_length();
+  uint update_young_list_max_and_target_length(size_t rs_lengths);
+
+  // Update the young list target length either by setting it to the
+  // desired fixed value or by calculating it using G1's pause
+  // prediction model. If no rs_lengths parameter is passed, predict
+  // the RS lengths using the prediction model, otherwise use the
+  // given rs_lengths as the prediction.
+  // Returns the unbounded young list target length.
+  uint update_young_list_target_length(size_t rs_lengths);
+
+  // Calculate and return the minimum desired young list target
+  // length. This is the minimum desired young list length according
+  // to the user's inputs.
+  uint calculate_young_list_desired_min_length(uint base_min_length) const;
+
+  // Calculate and return the maximum desired young list target
+  // length. This is the maximum desired young list length according
+  // to the user's inputs.
+  uint calculate_young_list_desired_max_length() const;
+
+  // Calculate and return the maximum young list target length that
+  // can fit into the pause time goal. The parameters are: rs_lengths
+  // represent the prediction of how large the young RSet lengths will
+  // be, base_min_length is the already existing number of regions in
+  // the young list, min_length and max_length are the desired min and
+  // max young list length according to the user's inputs.
+  uint calculate_young_list_target_length(size_t rs_lengths,
+                                          uint base_min_length,
+                                          uint desired_min_length,
+                                          uint desired_max_length) const;
+
+  // Result of the bounded_young_list_target_length() method, containing both the
+  // bounded as well as the unbounded young list target lengths in this order.
+  typedef Pair<uint, uint, StackObj> YoungTargetLengths;
+  YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
+
+  void update_rs_lengths_prediction();
+  void update_rs_lengths_prediction(size_t prediction);
+
+  // Check whether a given young length (young_length) fits into the
+  // given target pause time and whether the prediction for the amount
+  // of objects to be copied for the given length will fit into the
+  // given free space (expressed by base_free_regions).  It is used by
+  // calculate_young_list_target_length().
+  bool predict_will_fit(uint young_length, double base_time_ms,
+                        uint base_free_regions, double target_pause_time_ms) const;
+
+public:
+  size_t pending_cards() const { return _pending_cards; }
+
+  // Calculate the minimum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  uint calc_min_old_cset_length() const;
+
+  // Calculate the maximum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  uint calc_max_old_cset_length() const;
+
+  // Returns the given amount of uncollected reclaimable space
+  // as a percentage of the current heap capacity.
+  double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
+
+private:
+  // Sets up marking if proper conditions are met.
+  void maybe_start_marking();
+
+  // The kind of STW pause.
+  enum PauseKind {
+    FullGC,
+    YoungOnlyGC,
+    MixedGC,
+    LastYoungGC,
+    InitialMarkGC,
+    Cleanup,
+    Remark
+  };
+
+  // Calculate PauseKind from internal state.
+  PauseKind young_gc_pause_kind() const;
+  // Record the given STW pause with the given start and end times (in s).
+  void record_pause(PauseKind kind, double start, double end);
+  // Indicate that we aborted marking before doing any mixed GCs.
+  void abort_time_to_mixed_tracking();
+public:
+
+  G1Policy();
+
+  virtual ~G1Policy();
+
+  G1CollectorState* collector_state() const;
+
+  G1GCPhaseTimes* phase_times() const { return _phase_times; }
+
+  // Check the current value of the young list RSet lengths and
+  // compare it against the last prediction. If the current value is
+  // higher, recalculate the young list target length prediction.
+  void revise_young_list_target_length_if_necessary(size_t rs_lengths);
+
+  // This should be called after the heap is resized.
+  void record_new_heap_size(uint new_number_of_regions);
+
+  void init();
+
+  virtual void note_gc_start();
+
+  bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
+
+  bool about_to_start_mixed_phase() const;
+
+  // Record the start and end of an evacuation pause.
+  void record_collection_pause_start(double start_time_sec);
+  void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
+
+  // Record the start and end of a full collection.
+  void record_full_collection_start();
+  void record_full_collection_end();
+
+  // Must currently be called while the world is stopped.
+  void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
+
+  // Record start and end of remark.
+  void record_concurrent_mark_remark_start();
+  void record_concurrent_mark_remark_end();
+
+  // Record start, end, and completion of cleanup.
+  void record_concurrent_mark_cleanup_start();
+  void record_concurrent_mark_cleanup_end();
+  void record_concurrent_mark_cleanup_completed();
+
+  virtual void print_phases();
+
+  // Record how much space we copied during a GC. This is typically
+  // called when a GC alloc region is being retired.
+  void record_bytes_copied_during_gc(size_t bytes) {
+    _bytes_copied_during_gc += bytes;
+  }
+
+  // The amount of space we copied during a GC.
+  size_t bytes_copied_during_gc() const {
+    return _bytes_copied_during_gc;
+  }
+
+  // Determine whether there are candidate regions so that the
+  // next GC should be mixed. The two action strings are used
+  // in the ergo output when the method returns true or false.
+  bool next_gc_should_be_mixed(const char* true_action_str,
+                               const char* false_action_str) const;
+
+  virtual void finalize_collection_set(double target_pause_time_ms);
+private:
+  // Set the state to start a concurrent marking cycle and clear
+  // _initiate_conc_mark_if_possible because it has now been
+  // acted on.
+  void initiate_conc_mark();
+
+public:
+  // This sets the initiate_conc_mark_if_possible() flag to start a
+  // new cycle, as long as we are not already in one. It's best if it
+  // is called during a safepoint when the test whether a cycle is in
+  // progress or not is stable.
+  bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
+
+  // This is called at the very beginning of an evacuation pause (it
+  // has to be the first thing that the pause does). If
+  // initiate_conc_mark_if_possible() is true, and the concurrent
+  // marking thread has completed its work during the previous cycle,
+  // it will set during_initial_mark_pause() to so that the pause does
+  // the initial-mark work and start a marking cycle.
+  void decide_on_conc_mark_initiation();
+
+  // Print stats on young survival ratio
+  void print_yg_surv_rate_info() const;
+
+  void finished_recalculating_age_indexes(bool is_survivors) {
+    if (is_survivors) {
+      _survivor_surv_rate_group->finished_recalculating_age_indexes();
+    } else {
+      _short_lived_surv_rate_group->finished_recalculating_age_indexes();
+    }
+  }
+
+  size_t young_list_target_length() const { return _young_list_target_length; }
+
+  bool is_young_list_full() const;
+
+  bool can_expand_young_list() const;
+
+  uint young_list_max_length() const {
+    return _young_list_max_length;
+  }
+
+  bool adaptive_young_list_length() const;
+
+  virtual bool should_process_references() const {
+    return true;
+  }
+
+private:
+  //
+  // Survivor regions policy.
+  //
+
+  // Current tenuring threshold, set to 0 if the collector reaches the
+  // maximum amount of survivors regions.
+  uint _tenuring_threshold;
+
+  // The limit on the number of regions allocated for survivors.
+  uint _max_survivor_regions;
+
+  AgeTable _survivors_age_table;
+
+public:
+  uint tenuring_threshold() const { return _tenuring_threshold; }
+
+  uint max_survivor_regions() {
+    return _max_survivor_regions;
+  }
+
+  void note_start_adding_survivor_regions() {
+    _survivor_surv_rate_group->start_adding_regions();
+  }
+
+  void note_stop_adding_survivor_regions() {
+    _survivor_surv_rate_group->stop_adding_regions();
+  }
+
+  void record_age_table(AgeTable* age_table) {
+    _survivors_age_table.merge(age_table);
+  }
+
+  void update_max_gc_locker_expansion();
+
+  // Calculates survivor space parameters.
+  void update_survivors_policy();
+};
+
+#endif // SHARE_VM_GC_G1_G1POLICY_HPP
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -28,7 +28,6 @@
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1FromCardCache.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
@@ -46,20 +45,108 @@
 #include "utilities/intHisto.hpp"
 #include "utilities/stack.inline.hpp"
 
+// Collects information about the overall remembered set scan progress during an evacuation.
+class G1RemSetScanState : public CHeapObj<mtGC> {
+private:
+  size_t _max_regions;
+
+  // Scan progress for the remembered set of a single region. Transitions from
+  // Unclaimed -> Claimed -> Complete.
+  // At each of the transitions the thread that does the transition needs to perform
+  // some special action once. This is the reason for the extra "Claimed" state.
+  typedef jint G1RemsetIterState;
+
+  static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet.
+  static const G1RemsetIterState Claimed = 1;   // The remembered set is currently being scanned.
+  static const G1RemsetIterState Complete = 2;  // The remembered set has been completely scanned.
+
+  G1RemsetIterState volatile* _iter_states;
+  // The current location where the next thread should continue scanning in a region's
+  // remembered set.
+  size_t volatile* _iter_claims;
+
+public:
+  G1RemSetScanState() :
+    _max_regions(0),
+    _iter_states(NULL),
+    _iter_claims(NULL) {
+
+  }
+
+  ~G1RemSetScanState() {
+    if (_iter_states != NULL) {
+      FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states);
+    }
+    if (_iter_claims != NULL) {
+      FREE_C_HEAP_ARRAY(size_t, _iter_claims);
+    }
+  }
+
+  void initialize(uint max_regions) {
+    assert(_iter_states == NULL, "Must not be initialized twice");
+    assert(_iter_claims == NULL, "Must not be initialized twice");
+    _max_regions = max_regions;
+    _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC);
+    _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
+  }
+
+  void reset() {
+    for (uint i = 0; i < _max_regions; i++) {
+      _iter_states[i] = Unclaimed;
+    }
+    memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t));
+  }
+
+  // Attempt to claim the remembered set of the region for iteration. Returns true
+  // if this call caused the transition from Unclaimed to Claimed.
+  inline bool claim_iter(uint region) {
+    assert(region < _max_regions, "Tried to access invalid region %u", region);
+    if (_iter_states[region] != Unclaimed) {
+      return false;
+    }
+    jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_states[region]), Unclaimed);
+    return (res == Unclaimed);
+  }
+
+  // Try to atomically sets the iteration state to "complete". Returns true for the
+  // thread that caused the transition.
+  inline bool set_iter_complete(uint region) {
+    if (iter_is_complete(region)) {
+      return false;
+    }
+    jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_states[region]), Claimed);
+    return (res == Claimed);
+  }
+
+  // Returns true if the region's iteration is complete.
+  inline bool iter_is_complete(uint region) const {
+    assert(region < _max_regions, "Tried to access invalid region %u", region);
+    return _iter_states[region] == Complete;
+  }
+
+  // The current position within the remembered set of the given region.
+  inline size_t iter_claimed(uint region) const {
+    assert(region < _max_regions, "Tried to access invalid region %u", region);
+    return _iter_claims[region];
+  }
+
+  // Claim the next block of cards within the remembered set of the region with
+  // step size.
+  inline size_t iter_claimed_next(uint region, size_t step) {
+    return Atomic::add(step, &_iter_claims[region]) - step;
+  }
+};
+
 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) :
   _g1(g1),
+  _scan_state(new G1RemSetScanState()),
   _conc_refine_cards(0),
   _ct_bs(ct_bs),
   _g1p(_g1->g1_policy()),
   _cg1r(g1->concurrent_g1_refine()),
-  _cset_rs_update_cl(NULL),
   _prev_period_summary(),
   _into_cset_dirty_card_queue_set(false)
 {
-  _cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
-  for (uint i = 0; i < n_workers(); i++) {
-    _cset_rs_update_cl[i] = NULL;
-  }
   if (log_is_enabled(Trace, gc, remset)) {
     _prev_period_summary.initialize(this);
   }
@@ -75,10 +162,9 @@
 }
 
 G1RemSet::~G1RemSet() {
-  for (uint i = 0; i < n_workers(); i++) {
-    assert(_cset_rs_update_cl[i] == NULL, "it should be");
+  if (_scan_state != NULL) {
+    delete _scan_state;
   }
-  FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl);
 }
 
 uint G1RemSet::num_par_rem_sets() {
@@ -87,6 +173,7 @@
 
 void G1RemSet::initialize(size_t capacity, uint max_regions) {
   G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
+  _scan_state->initialize(max_regions);
   {
     GCTraceTime(Debug, gc, marking)("Initialize Card Live Data");
     _card_live_data.initialize(capacity, max_regions);
@@ -97,29 +184,29 @@
   }
 }
 
-ScanRSClosure::ScanRSClosure(G1ParPushHeapRSClosure* oc,
-                             CodeBlobClosure* code_root_cl,
-                             uint worker_i) :
-  _oc(oc),
+G1ScanRSClosure::G1ScanRSClosure(G1RemSetScanState* scan_state,
+                                 G1ParPushHeapRSClosure* push_heap_cl,
+                                 CodeBlobClosure* code_root_cl,
+                                 uint worker_i) :
+  _scan_state(scan_state),
+  _push_heap_cl(push_heap_cl),
   _code_root_cl(code_root_cl),
   _strong_code_root_scan_time_sec(0.0),
   _cards(0),
   _cards_done(0),
-  _worker_i(worker_i),
-  _try_claimed(false) {
+  _worker_i(worker_i) {
   _g1h = G1CollectedHeap::heap();
   _bot = _g1h->bot();
   _ct_bs = _g1h->g1_barrier_set();
   _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1);
 }
 
-void ScanRSClosure::scanCard(size_t index, HeapRegion *r) {
+void G1ScanRSClosure::scan_card(size_t index, HeapRegion *r) {
   // Stack allocate the DirtyCardToOopClosure instance
-  HeapRegionDCTOC cl(_g1h, r, _oc,
-      CardTableModRefBS::Precise);
+  HeapRegionDCTOC cl(_g1h, r, _push_heap_cl, CardTableModRefBS::Precise);
 
   // Set the "from" region in the closure.
-  _oc->set_region(r);
+  _push_heap_cl->set_region(r);
   MemRegion card_region(_bot->address_for_index(index), BOTConstants::N_words);
   MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
   MemRegion mr = pre_gc_allocated.intersection(card_region);
@@ -133,37 +220,39 @@
   }
 }
 
-void ScanRSClosure::scan_strong_code_roots(HeapRegion* r) {
+void G1ScanRSClosure::scan_strong_code_roots(HeapRegion* r) {
   double scan_start = os::elapsedTime();
   r->strong_code_roots_do(_code_root_cl);
   _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
 }
 
-bool ScanRSClosure::doHeapRegion(HeapRegion* r) {
+bool G1ScanRSClosure::doHeapRegion(HeapRegion* r) {
   assert(r->in_collection_set(), "should only be called on elements of CS.");
-  HeapRegionRemSet* hrrs = r->rem_set();
-  if (hrrs->iter_is_complete()) return false; // All done.
-  if (!_try_claimed && !hrrs->claim_iter()) return false;
-  // If we ever free the collection set concurrently, we should also
-  // clear the card table concurrently therefore we won't need to
-  // add regions of the collection set to the dirty cards region.
-  _g1h->push_dirty_cards_region(r);
-  // If we didn't return above, then
-  //   _try_claimed || r->claim_iter()
-  // is true: either we're supposed to work on claimed-but-not-complete
-  // regions, or we successfully claimed the region.
+  uint region_idx = r->hrm_index();
 
-  HeapRegionRemSetIterator iter(hrrs);
+  if (_scan_state->iter_is_complete(region_idx)) {
+    return false;
+  }
+  if (_scan_state->claim_iter(region_idx)) {
+    // If we ever free the collection set concurrently, we should also
+    // clear the card table concurrently therefore we won't need to
+    // add regions of the collection set to the dirty cards region.
+    _g1h->push_dirty_cards_region(r);
+  }
+
+  HeapRegionRemSetIterator iter(r->rem_set());
   size_t card_index;
 
   // We claim cards in block so as to reduce the contention. The block size is determined by
   // the G1RSetScanBlockSize parameter.
-  size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
+  size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, _block_size);
   for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
-    if (current_card >= jump_to_card + _block_size) {
-      jump_to_card = hrrs->iter_claimed_next(_block_size);
+    if (current_card >= claimed_card_block + _block_size) {
+      claimed_card_block = _scan_state->iter_claimed_next(region_idx, _block_size);
     }
-    if (current_card < jump_to_card) continue;
+    if (current_card < claimed_card_block) {
+      continue;
+    }
     HeapWord* card_start = _g1h->bot()->address_for_index(card_index);
 
     HeapRegion* card_region = _g1h->heap_region_containing(card_start);
@@ -176,38 +265,33 @@
     // If the card is dirty, then we will scan it during updateRS.
     if (!card_region->in_collection_set() &&
         !_ct_bs->is_card_dirty(card_index)) {
-      scanCard(card_index, card_region);
+      scan_card(card_index, card_region);
     }
   }
-  if (!_try_claimed) {
+  if (_scan_state->set_iter_complete(region_idx)) {
     // Scan the strong code root list attached to the current region
     scan_strong_code_roots(r);
-
-    hrrs->set_iter_complete();
   }
   return false;
 }
 
-size_t G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
-                        CodeBlobClosure* heap_region_codeblobs,
-                        uint worker_i) {
+size_t G1RemSet::scan_rem_set(G1ParPushHeapRSClosure* oops_in_heap_closure,
+                              CodeBlobClosure* heap_region_codeblobs,
+                              uint worker_i) {
   double rs_time_start = os::elapsedTime();
 
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
-  ScanRSClosure scanRScl(oc, heap_region_codeblobs, worker_i);
+  G1ScanRSClosure cl(_scan_state, oops_in_heap_closure, heap_region_codeblobs, worker_i);
+  _g1->collection_set_iterate_from(startRegion, &cl);
 
-  _g1->collection_set_iterate_from(startRegion, &scanRScl);
-  scanRScl.set_try_claimed();
-  _g1->collection_set_iterate_from(startRegion, &scanRScl);
-
-  double scan_rs_time_sec = (os::elapsedTime() - rs_time_start)
-                            - scanRScl.strong_code_root_scan_time_sec();
+   double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
+                              cl.strong_code_root_scan_time_sec();
 
   _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec);
-  _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, scanRScl.strong_code_root_scan_time_sec());
+  _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec());
 
-  return scanRScl.cards_done();
+  return cl.cards_done();
 }
 
 // Closure used for updating RSets and recording references that
@@ -217,10 +301,12 @@
 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
   G1RemSet* _g1rs;
   DirtyCardQueue* _into_cset_dcq;
+  G1ParPushHeapRSClosure* _cl;
 public:
   RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
-                                              DirtyCardQueue* into_cset_dcq) :
-    _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
+                                              DirtyCardQueue* into_cset_dcq,
+                                              G1ParPushHeapRSClosure* cl) :
+    _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq), _cl(cl)
   {}
 
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
@@ -231,7 +317,7 @@
     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
     assert(worker_i < ParallelGCThreads, "should be a GC worker");
 
-    if (_g1rs->refine_card(card_ptr, worker_i, true)) {
+    if (_g1rs->refine_card(card_ptr, worker_i, _cl)) {
       // 'card_ptr' contains references that point into the collection
       // set. We need to record the card in the DCQS
       // (_into_cset_dirty_card_queue_set)
@@ -244,8 +330,10 @@
   }
 };
 
-void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
-  RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
+void G1RemSet::update_rem_set(DirtyCardQueue* into_cset_dcq,
+                              G1ParPushHeapRSClosure* oops_in_heap_closure,
+                              uint worker_i) {
+  RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq, oops_in_heap_closure);
 
   G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
   if (ConcurrentG1Refine::hot_card_cache_enabled()) {
@@ -261,14 +349,9 @@
   HeapRegionRemSet::cleanup();
 }
 
-size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
+size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* cl,
                                              CodeBlobClosure* heap_region_codeblobs,
                                              uint worker_i) {
-  // We cache the value of 'oc' closure into the appropriate slot in the
-  // _cset_rs_update_cl for this worker
-  assert(worker_i < n_workers(), "sanity");
-  _cset_rs_update_cl[worker_i] = oc;
-
   // A DirtyCardQueue that is used to hold cards containing references
   // that point into the collection set. This DCQ is associated with a
   // special DirtyCardQueueSet (see g1CollectedHeap.hpp).  Under normal
@@ -280,18 +363,16 @@
   // DirtyCardQueueSet that is used to manage RSet updates
   DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set);
 
-  updateRS(&into_cset_dcq, worker_i);
-  size_t cards_scanned = scanRS(oc, heap_region_codeblobs, worker_i);
-
-  // We now clear the cached values of _cset_rs_update_cl for this worker
-  _cset_rs_update_cl[worker_i] = NULL;
-  return cards_scanned;
+  update_rem_set(&into_cset_dcq, cl, worker_i);
+  return scan_rem_set(cl, heap_region_codeblobs, worker_i);;
 }
 
 void G1RemSet::prepare_for_oops_into_collection_set_do() {
   _g1->set_refine_cte_cl_concurrency(false);
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   dcqs.concatenate_logs();
+
+  _scan_state->reset();
 }
 
 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
@@ -366,8 +447,9 @@
 // into the collection set, if we're checking for such references;
 // false otherwise.
 
-bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
-                           bool check_for_refs_into_cset) {
+bool G1RemSet::refine_card(jbyte* card_ptr,
+                           uint worker_i,
+                           G1ParPushHeapRSClosure*  oops_in_heap_closure) {
   assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
          p2i(card_ptr),
@@ -375,6 +457,8 @@
          p2i(_ct_bs->addr_for(card_ptr)),
          _g1->addr_to_region(_ct_bs->addr_for(card_ptr)));
 
+  bool check_for_refs_into_cset = oops_in_heap_closure != NULL;
+
   // If the card is no longer dirty, nothing to do.
   if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
     // No need to return that this card contains refs that point
@@ -451,15 +535,6 @@
   HeapWord* end   = start + CardTableModRefBS::card_size_in_words;
   MemRegion dirtyRegion(start, end);
 
-  G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
-  if (check_for_refs_into_cset) {
-    // ConcurrentG1RefineThreads have worker numbers larger than what
-    // _cset_rs_update_cl[] is set up to handle. But those threads should
-    // only be active outside of a collection which means that when they
-    // reach here they should have check_for_refs_into_cset == false.
-    assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
-    oops_in_heap_closure = _cset_rs_update_cl[worker_i];
-  }
   G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
                                                  _g1->g1_rem_set(),
                                                  oops_in_heap_closure,
@@ -579,7 +654,7 @@
     hot_card_cache->set_use_cache(false);
 
     DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set);
-    updateRS(&into_cset_dcq, 0);
+    update_rem_set(&into_cset_dcq, NULL, 0);
     _into_cset_dirty_card_queue_set.clear();
 
     hot_card_cache->set_use_cache(use_hot_card_cache);
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,8 +41,9 @@
 class ConcurrentG1Refine;
 class CodeBlobClosure;
 class G1CollectedHeap;
-class G1CollectorPolicy;
 class G1ParPushHeapRSClosure;
+class G1RemSetScanState;
+class G1Policy;
 class G1SATBCardTableModRefBS;
 class HeapRegionClaimer;
 
@@ -51,6 +52,7 @@
 // so that they can be used to update the individual region remsets.
 class G1RemSet: public CHeapObj<mtGC> {
 private:
+  G1RemSetScanState* _scan_state;
   G1CardLiveData _card_live_data;
 
   G1RemSetSummary _prev_period_summary;
@@ -68,14 +70,10 @@
 
 protected:
   CardTableModRefBS*     _ct_bs;
-  G1CollectorPolicy*     _g1p;
+  G1Policy*              _g1p;
 
   ConcurrentG1Refine*    _cg1r;
 
-  // Used for caching the closure that is responsible for scanning
-  // references into the collection set.
-  G1ParPushHeapRSClosure** _cset_rs_update_cl;
-
 public:
   // Gives an approximation on how many threads can be expected to add records to
   // a remembered set in parallel. This can be used for sizing data structures to
@@ -95,9 +93,9 @@
   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
   ~G1RemSet();
 
-  // Invoke "blk->do_oop" on all pointers into the collection set
+  // Invoke "cl->do_oop" on all pointers into the collection set
   // from objects in regions outside the collection set (having
-  // invoked "blk->set_region" to set the "from" region correctly
+  // invoked "cl->set_region" to set the "from" region correctly
   // beforehand.)
   //
   // Apply non_heap_roots on the oops of the unmarked nmethods
@@ -112,7 +110,7 @@
   //
   // Returns the number of cards scanned while looking for pointers
   // into the collection set.
-  size_t oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
+  size_t oops_into_collection_set_do(G1ParPushHeapRSClosure* cl,
                                      CodeBlobClosure* heap_region_codeblobs,
                                      uint worker_i);
 
@@ -124,13 +122,15 @@
   void prepare_for_oops_into_collection_set_do();
   void cleanup_after_oops_into_collection_set_do();
 
-  size_t scanRS(G1ParPushHeapRSClosure* oc,
-                CodeBlobClosure* heap_region_codeblobs,
-                uint worker_i);
+  size_t scan_rem_set(G1ParPushHeapRSClosure* oops_in_heap_closure,
+                      CodeBlobClosure* heap_region_codeblobs,
+                      uint worker_i);
 
-  void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
+  G1RemSetScanState* scan_state() const { return _scan_state; }
 
-  CardTableModRefBS* ct_bs() { return _ct_bs; }
+  // Flush remaining refinement buffers into the remembered set,
+  // applying oops_in_heap_closure on the references found.
+  void update_rem_set(DirtyCardQueue* into_cset_dcq, G1ParPushHeapRSClosure* oops_in_heap_closure, uint worker_i);
 
   // Record, if necessary, the fact that *p (where "p" is in region "from",
   // which is required to be non-NULL) has changed to a new non-NULL value.
@@ -145,12 +145,12 @@
   void scrub(uint worker_num, HeapRegionClaimer* hrclaimer);
 
   // Refine the card corresponding to "card_ptr".
-  // If check_for_refs_into_cset is true, a true result is returned
+  // If oops_in_heap_closure is not NULL, a true result is returned
   // if the given card contains oops that have references into the
   // current collection set.
   virtual bool refine_card(jbyte* card_ptr,
                            uint worker_i,
-                           bool check_for_refs_into_cset);
+                           G1ParPushHeapRSClosure* oops_in_heap_closure);
 
   // Print accumulated summary info from the start of the VM.
   virtual void print_summary_info();
@@ -179,11 +179,14 @@
 #endif
 };
 
-class ScanRSClosure : public HeapRegionClosure {
-  size_t _cards_done, _cards;
+class G1ScanRSClosure : public HeapRegionClosure {
+  G1RemSetScanState* _scan_state;
+
+  size_t _cards_done;
+  size_t _cards;
   G1CollectedHeap* _g1h;
 
-  G1ParPushHeapRSClosure* _oc;
+  G1ParPushHeapRSClosure* _push_heap_cl;
   CodeBlobClosure* _code_root_cl;
 
   G1BlockOffsetTable* _bot;
@@ -192,26 +195,23 @@
   double _strong_code_root_scan_time_sec;
   uint   _worker_i;
   size_t _block_size;
-  bool   _try_claimed;
 
+  void scan_card(size_t index, HeapRegion *r);
+  void scan_strong_code_roots(HeapRegion* r);
 public:
-  ScanRSClosure(G1ParPushHeapRSClosure* oc,
-                CodeBlobClosure* code_root_cl,
-                uint worker_i);
+  G1ScanRSClosure(G1RemSetScanState* scan_state,
+                  G1ParPushHeapRSClosure* push_heap_cl,
+                  CodeBlobClosure* code_root_cl,
+                  uint worker_i);
 
   bool doHeapRegion(HeapRegion* r);
 
   double strong_code_root_scan_time_sec() {
     return _strong_code_root_scan_time_sec;
   }
+
   size_t cards_done() { return _cards_done;}
   size_t cards_looked_up() { return _cards;}
-  void set_try_claimed() { _try_claimed = true; }
-private:
-  void scanCard(size_t index, HeapRegion *r);
-  void printCard(HeapRegion* card_region, size_t card_index,
-                 HeapWord* card_start);
-  void scan_strong_code_roots(HeapRegion* r);
 };
 
 class UpdateRSOopClosure: public ExtendedOopClosure {
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -30,9 +30,9 @@
 #include "gc/g1/bufferingOopClosure.hpp"
 #include "gc/g1/g1CodeBlobClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1YoungGenSizer.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1YoungGenSizer.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -108,13 +108,18 @@
   assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
 }
 
-uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
+void G1YoungGenSizer::adjust_max_new_size(uint number_of_heap_regions) {
+
   // We need to pass the desired values because recalculation may not update these
   // values in some cases.
   uint temp = _min_desired_young_length;
   uint result = _max_desired_young_length;
   recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
-  return result;
+
+  size_t max_young_size = result * HeapRegion::GrainBytes;
+  if (max_young_size != MaxNewSize) {
+    FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
+  }
 }
 
 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
--- a/hotspot/src/share/vm/gc/g1/g1YoungGenSizer.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1YoungGenSizer.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -22,6 +22,9 @@
  *
  */
 
+#ifndef SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
+#define SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
+
 #include "memory/allocation.hpp"
 
 // There are three command line options related to the young gen size:
@@ -60,7 +63,7 @@
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
-class G1YoungGenSizer : public CHeapObj<mtGC> {
+class G1YoungGenSizer VALUE_OBJ_CLASS_SPEC {
 private:
   enum SizerKind {
     SizerDefaults,
@@ -84,13 +87,13 @@
   G1YoungGenSizer();
   // Calculate the maximum length of the young gen given the number of regions
   // depending on the sizing algorithm.
-  uint max_young_length(uint number_of_heap_regions);
+  void adjust_max_new_size(uint number_of_heap_regions);
 
   void heap_size_changed(uint new_number_of_heap_regions);
-  uint min_desired_young_length() {
+  uint min_desired_young_length() const {
     return _min_desired_young_length;
   }
-  uint max_desired_young_length() {
+  uint max_desired_young_length() const {
     return _max_desired_young_length;
   }
 
@@ -99,3 +102,4 @@
   }
 };
 
+#endif // SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
--- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -24,8 +24,8 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
@@ -74,7 +74,7 @@
 void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
   SuspendibleThreadSetJoiner sts;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
+  G1Policy* g1p = g1h->g1_policy();
   if (g1p->adaptive_young_list_length()) {
     int regions_visited = 0;
     HeapRegion* hr = g1h->young_list()->first_region();
--- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -37,7 +37,7 @@
 // The assumption is that a significant part of the GC is spent on scanning
 // the remembered sets (and many other components), so this thread constantly
 // reevaluates the prediction for the remembered set scanning costs, and potentially
-// G1CollectorPolicy resizes the young gen. This may do a premature GC or even
+// G1Policy resizes the young gen. This may do a premature GC or even
 // increase the young gen size to keep pause time length goal.
 class G1YoungRemSetSamplingThread: public ConcurrentGCThread {
 private:
--- a/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -112,8 +112,7 @@
   product(size_t, G1ConcRefinementRedZone, 0,                               \
           "Maximum number of enqueued update buffers before mutator "       \
           "threads start processing new ones instead of enqueueing them. "  \
-          "Will be selected ergonomically by default. Zero will disable "   \
-          "concurrent processing.")                                         \
+          "Will be selected ergonomically by default.")                     \
           range(0, max_intx)                                                \
                                                                             \
   product(size_t, G1ConcRefinementGreenZone, 0,                             \
@@ -127,11 +126,12 @@
           "specified number of milliseconds to do miscellaneous work.")     \
           range(0, max_jint)                                                \
                                                                             \
-  product(size_t, G1ConcRefinementThresholdStep, 0,                         \
+  product(size_t, G1ConcRefinementThresholdStep, 2,                         \
           "Each time the rset update queue increases by this amount "       \
           "activate the next refinement thread if available. "              \
-          "Will be selected ergonomically by default.")                     \
-          range(0, SIZE_MAX)                                                \
+          "The actual step size will be selected ergonomically by "         \
+          "default, with this value used to determine a lower bound.")      \
+          range(1, SIZE_MAX)                                                \
                                                                             \
   product(intx, G1RSetUpdatingPauseTimePercent, 10,                         \
           "A target percentage of time that is allowed to be spend on "     \
@@ -201,9 +201,9 @@
           range(0, 32*M)                                                    \
           constraint(G1HeapRegionSizeConstraintFunc,AfterMemoryInit)        \
                                                                             \
-  product(uintx, G1ConcRefinementThreads, 0,                                \
-          "If non-0 is the number of parallel rem set update threads, "     \
-          "otherwise the value is determined ergonomically.")               \
+  product(uint, G1ConcRefinementThreads, 0,                                 \
+          "The number of parallel rem set update threads. "                 \
+          "Will be set ergonomically by default.")                          \
           range(0, (max_jint-1)/wordSize)                                   \
                                                                             \
   develop(bool, G1VerifyCTCleanup, false,                                   \
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -187,6 +187,7 @@
   zero_marked_bytes();
 
   init_top_at_mark_start();
+  _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp();
   if (clear_space) clear(SpaceDecorator::Mangle);
 }
 
@@ -204,7 +205,7 @@
   // GC efficiency is the ratio of how much space would be
   // reclaimed over how long we predict it would take to reclaim it.
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
+  G1Policy* g1p = g1h->g1_policy();
 
   // Retrieve a prediction of the elapsed time for this region for
   // a mixed gc because the region will only be evacuated during a
@@ -1044,7 +1045,7 @@
 
 void G1ContiguousSpace::record_timestamp() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
+  uint curr_gc_time_stamp = g1h->get_gc_time_stamp();
 
   if (_gc_time_stamp < curr_gc_time_stamp) {
     // Setting the time stamp here tells concurrent readers to look at
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,7 @@
  protected:
   G1BlockOffsetTablePart _bot_part;
   Mutex _par_alloc_lock;
-  volatile unsigned _gc_time_stamp;
+  volatile uint _gc_time_stamp;
   // When we need to retire an allocation region, while other threads
   // are also concurrently trying to allocate into it, we typically
   // allocate a dummy object at the end of the region to ensure that
@@ -174,7 +174,7 @@
   HeapWord* scan_top() const;
   void record_timestamp();
   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
-  unsigned get_gc_time_stamp() { return _gc_time_stamp; }
+  uint get_gc_time_stamp() { return _gc_time_stamp; }
   void record_retained_region();
 
   // See the comment above in the declaration of _pre_dummy_top for an
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -692,8 +692,8 @@
                                    HeapRegion* hr)
   : _bot(bot),
     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
-    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
-  reset_for_par_iteration();
+    _code_roots(),
+    _other_regions(hr, &_m) {
 }
 
 void HeapRegionRemSet::setup_remset_size() {
@@ -710,20 +710,6 @@
   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 }
 
-bool HeapRegionRemSet::claim_iter() {
-  if (_iter_state != Unclaimed) return false;
-  jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
-  return (res == Unclaimed);
-}
-
-void HeapRegionRemSet::set_iter_complete() {
-  _iter_state = Complete;
-}
-
-bool HeapRegionRemSet::iter_is_complete() {
-  return _iter_state == Complete;
-}
-
 #ifndef PRODUCT
 void HeapRegionRemSet::print() {
   HeapRegionRemSetIterator iter(this);
@@ -760,14 +746,6 @@
   _code_roots.clear();
   _other_regions.clear();
   assert(occupied_locked() == 0, "Should be clear.");
-  reset_for_par_iteration();
-}
-
-void HeapRegionRemSet::reset_for_par_iteration() {
-  _iter_state = Unclaimed;
-  _iter_claimed = 0;
-  // It's good to check this to make sure that the two methods are in sync.
-  assert(verify_ready_for_par_iteration(), "post-condition");
 }
 
 void HeapRegionRemSet::scrub(G1CardLiveData* live_data) {
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -185,10 +185,6 @@
 
   OtherRegionsTable _other_regions;
 
-  enum ParIterState { Unclaimed, Claimed, Complete };
-  volatile ParIterState _iter_state;
-  volatile size_t _iter_claimed;
-
 public:
   HeapRegionRemSet(G1BlockOffsetTable* bot, HeapRegion* hr);
 
@@ -240,27 +236,6 @@
   void clear();
   void clear_locked();
 
-  // Attempt to claim the region.  Returns true iff this call caused an
-  // atomic transition from Unclaimed to Claimed.
-  bool claim_iter();
-  // Sets the iteration state to "complete".
-  void set_iter_complete();
-  // Returns "true" iff the region's iteration is complete.
-  bool iter_is_complete();
-
-  // Support for claiming blocks of cards during iteration
-  size_t iter_claimed() const { return _iter_claimed; }
-  // Claim the next block of cards
-  size_t iter_claimed_next(size_t step) {
-    return Atomic::add(step, &_iter_claimed) - step;
-  }
-
-  void reset_for_par_iteration();
-
-  bool verify_ready_for_par_iteration() {
-    return (_iter_state == Unclaimed) && (_iter_claimed == 0);
-  }
-
   // The actual # of bytes this hr_remset takes up.
   // Note also includes the strong code root set.
   size_t mem_size() {
--- a/hotspot/src/share/vm/gc/g1/heapRegionSet.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegionSet.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -38,7 +38,6 @@
   assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name());
   assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
          "Empty region %u is not free or archive for set %s", hr->hrm_index(), name());
-  assert(hr->rem_set()->verify_ready_for_par_iteration(), "Wrong iteration state %u", hr->hrm_index());
 }
 #endif
 
--- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcTimer.hpp"
--- a/hotspot/src/share/vm/gc/g1/youngList.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/g1/youngList.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
--- a/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -40,6 +40,7 @@
 #include "utilities/preserveException.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
 #endif // INCLUDE_ALL_GCS
 
 VM_GC_Operation::~VM_GC_Operation() {
--- a/hotspot/src/share/vm/logging/logConfiguration.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/logging/logConfiguration.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -382,7 +382,7 @@
   return true;
 }
 
-void LogConfiguration::describe(outputStream* out) {
+void LogConfiguration::describe_available(outputStream* out){
   out->print("Available log levels:");
   for (size_t i = 0; i < LogLevel::Count; i++) {
     out->print("%s %s", (i == 0 ? "" : ","), LogLevel::name(static_cast<LogLevelType>(i)));
@@ -402,7 +402,9 @@
   }
   out->cr();
 
-  ConfigurationLock cl;
+}
+
+void LogConfiguration::describe_current_configuration(outputStream* out){
   out->print_cr("Log output configuration:");
   for (size_t i = 0; i < _n_outputs; i++) {
     out->print("#" SIZE_FORMAT ": %s %s ", i, _outputs[i]->name(), _outputs[i]->config_string());
@@ -416,6 +418,12 @@
   }
 }
 
+void LogConfiguration::describe(outputStream* out) {
+  describe_available(out);
+  ConfigurationLock cl;
+  describe_current_configuration(out);
+}
+
 void LogConfiguration::print_command_line_help(FILE* out) {
   jio_fprintf(out, "-Xlog Usage: -Xlog[:[what][:[output][:[decorators][:output-options]]]]\n"
               "\t where 'what' is a combination of tags and levels on the form tag1[+tag2...][*][=level][,...]\n"
--- a/hotspot/src/share/vm/logging/logConfiguration.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/logging/logConfiguration.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -37,6 +37,7 @@
 // kept implicitly in the LogTagSets and their LogOutputLists. During configuration the tagsets
 // are iterated over and updated accordingly.
 class LogConfiguration : public AllStatic {
+ friend class VMError;
  public:
   // Function for listeners
   typedef void (*UpdateListenerFunction)(void);
@@ -79,6 +80,11 @@
   // This should be called after any configuration change while still holding ConfigurationLock
   static void notify_update_listeners();
 
+  // Respectively describe the built-in and runtime dependent portions of the configuration.
+  static void describe_available(outputStream* out);
+  static void describe_current_configuration(outputStream* out);
+
+
  public:
   // Initialization and finalization of log configuration, to be run at vm startup and shutdown respectively.
   static void initialize(jlong vm_start_time);
--- a/hotspot/src/share/vm/logging/logPrefix.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/logging/logPrefix.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -55,6 +55,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, cset)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, heap)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, ihop)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, refine)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap, region)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, freelist)) \
--- a/hotspot/src/share/vm/logging/logTag.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/logging/logTag.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -62,6 +62,7 @@
   LOG_TAG(ihop) \
   LOG_TAG(itables) \
   LOG_TAG(jni) \
+  LOG_TAG(jvmti) \
   LOG_TAG(liveness) \
   LOG_TAG(logging) \
   LOG_TAG(marking) \
@@ -69,6 +70,7 @@
   LOG_TAG(modules) \
   LOG_TAG(monitorinflation) \
   LOG_TAG(monitormismatch) \
+  LOG_TAG(objecttagging) \
   LOG_TAG(os) \
   LOG_TAG(pagesize) \
   LOG_TAG(phases) \
--- a/hotspot/src/share/vm/memory/iterator.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -355,6 +355,9 @@
   // Read/write the void pointer pointed to by p.
   virtual void do_ptr(void** p) = 0;
 
+  // Read/write the 32-bit unsigned integer pointed to by p.
+  virtual void do_u4(u4* p) = 0;
+
   // Read/write the region specified.
   virtual void do_region(u_char* start, size_t size) = 0;
 
@@ -363,6 +366,10 @@
   // for verification that sections of the serialized data are of the
   // correct length.
   virtual void do_tag(int tag) = 0;
+
+  bool writing() {
+    return !reading();
+  }
 };
 
 class SymbolClosure : public StackObj {
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -31,6 +31,7 @@
 #include "classfile/sharedClassUtil.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
 #include "code/codeCache.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecodeStream.hpp"
@@ -106,7 +107,8 @@
 // Read/write a data stream for restoring/preserving metadata pointers and
 // miscellaneous data from/to the shared archive file.
 
-void MetaspaceShared::serialize(SerializeClosure* soc) {
+void MetaspaceShared::serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
+                                size_t* space_size) {
   int tag = 0;
   soc->do_tag(--tag);
 
@@ -128,6 +130,15 @@
   vmSymbols::serialize(soc);
   soc->do_tag(--tag);
 
+  // Dump/restore the symbol and string tables
+  SymbolTable::serialize(soc);
+  StringTable::serialize(soc, string_space, space_size);
+  soc->do_tag(--tag);
+
+  // Dump/restore the misc information for system dictionary
+  SystemDictionaryShared::serialize(soc);
+  soc->do_tag(--tag);
+
   soc->do_tag(666);
 }
 
@@ -314,6 +325,11 @@
     ++top;
   }
 
+  void do_u4(u4* p) {
+    void* ptr = (void*)(uintx(*p));
+    do_ptr(&ptr);
+  }
+
   void do_tag(int tag) {
     check_space();
     *top = (intptr_t)tag;
@@ -348,6 +364,8 @@
   METASPACE_OBJ_TYPES_DO(f) \
   f(SymbolHashentry) \
   f(SymbolBucket) \
+  f(StringHashentry) \
+  f(StringBucket) \
   f(Other)
 
 #define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type,
@@ -406,13 +424,22 @@
   MetaspaceSharedStats *stats = MetaspaceShared::stats();
 
   // symbols
-  _counts[RW][SymbolHashentryType] = stats->symbol.hashentry_count;
-  _bytes [RW][SymbolHashentryType] = stats->symbol.hashentry_bytes;
-  other_bytes -= stats->symbol.hashentry_bytes;
+  _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
+  _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
+  _bytes [RO][TypeArrayU4Type]    -= stats->symbol.hashentry_bytes;
+
+  _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
+  _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
+  _bytes [RO][TypeArrayU4Type] -= stats->symbol.bucket_bytes;
 
-  _counts[RW][SymbolBucketType] = stats->symbol.bucket_count;
-  _bytes [RW][SymbolBucketType] = stats->symbol.bucket_bytes;
-  other_bytes -= stats->symbol.bucket_bytes;
+  // strings
+  _counts[RO][StringHashentryType] = stats->string.hashentry_count;
+  _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
+  _bytes [RO][TypeArrayU4Type]    -= stats->string.hashentry_bytes;
+
+  _counts[RO][StringBucketType] = stats->string.bucket_count;
+  _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
+  _bytes [RO][TypeArrayU4Type] -= stats->string.bucket_bytes;
 
   // TODO: count things like dictionary, vtable, etc
   _bytes[RW][OtherType] =  other_bytes;
@@ -488,7 +515,6 @@
   GrowableArray<Klass*> *_class_promote_order;
   VirtualSpace _md_vs;
   VirtualSpace _mc_vs;
-  CompactHashtableWriter* _string_cht;
   GrowableArray<MemRegion> *_string_regions;
 
 public:
@@ -600,39 +626,27 @@
   // Not doing this either.
 
   SystemDictionary::reorder_dictionary();
-
   NOT_PRODUCT(SystemDictionary::verify();)
-
-  // Copy the symbol table, string table, and the system dictionary to the shared
-  // space in usable form.  Copy the hashtable
-  // buckets first [read-write], then copy the linked lists of entries
-  // [read-only].
-
-  NOT_PRODUCT(SymbolTable::verify());
-  handle_misc_data_space_failure(SymbolTable::copy_compact_table(&md_top, md_end));
-
-  size_t ss_bytes = 0;
-  char* ss_low;
-  // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
-  _string_regions = new GrowableArray<MemRegion>(2);
-  NOT_PRODUCT(StringTable::verify());
-  handle_misc_data_space_failure(StringTable::copy_compact_table(&md_top, md_end, _string_regions,
-                                                                 &ss_bytes));
-  ss_low = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
-
   SystemDictionary::reverse();
   SystemDictionary::copy_buckets(&md_top, md_end);
 
   SystemDictionary::copy_table(&md_top, md_end);
 
   // Write the other data to the output array.
+  // SymbolTable, StringTable and extra information for system dictionary
+  NOT_PRODUCT(SymbolTable::verify());
+  NOT_PRODUCT(StringTable::verify());
+  size_t ss_bytes = 0;
+  char* ss_low;
+  // The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
+  _string_regions = new GrowableArray<MemRegion>(2);
+
   WriteClosure wc(md_top, md_end);
-  MetaspaceShared::serialize(&wc);
+  MetaspaceShared::serialize(&wc, _string_regions, &ss_bytes);
   md_top = wc.get_top();
+  ss_low = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
 
   // Print shared spaces all the time
-// To make fmt_space be a syntactic constant (for format warnings), use #define.
-#define fmt_space "%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%4.1f%% used] at " INTPTR_FORMAT
   Metaspace* ro_space = _loader_data->ro_metaspace();
   Metaspace* rw_space = _loader_data->rw_metaspace();
 
@@ -665,12 +679,13 @@
   const double mc_u_perc = mc_bytes / double(mc_alloced) * 100.0;
   const double total_u_perc = total_bytes / double(total_alloced) * 100.0;
 
+#define fmt_space "%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT
   tty->print_cr(fmt_space, "ro", ro_bytes, ro_t_perc, ro_alloced, ro_u_perc, p2i(ro_space->bottom()));
   tty->print_cr(fmt_space, "rw", rw_bytes, rw_t_perc, rw_alloced, rw_u_perc, p2i(rw_space->bottom()));
   tty->print_cr(fmt_space, "md", md_bytes, md_t_perc, md_alloced, md_u_perc, p2i(md_low));
   tty->print_cr(fmt_space, "mc", mc_bytes, mc_t_perc, mc_alloced, mc_u_perc, p2i(mc_low));
   tty->print_cr(fmt_space, "st", ss_bytes, ss_t_perc, ss_bytes,   100.0,     p2i(ss_low));
-  tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%4.1f%% used]",
+  tty->print_cr("total   : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
                  total_bytes, total_alloced, total_u_perc);
 
   // Update the vtable pointers in all of the Klass objects in the
@@ -974,6 +989,11 @@
     *p = (void*)obj;
   }
 
+  void do_u4(u4* p) {
+    intptr_t obj = nextPtr();
+    *p = (u4)(uintx(obj));
+  }
+
   void do_tag(int tag) {
     int old_tag;
     old_tag = (int)(intptr_t)nextPtr();
@@ -1097,21 +1117,6 @@
   buffer += sizeof(intptr_t);
   buffer += vtable_size;
 
-  // Create the shared symbol table using the compact table at this spot in the
-  // misc data space. (Todo: move this to read-only space. Currently
-  // this is mapped copy-on-write but will never be written into).
-
-  buffer = (char*)SymbolTable::init_shared_table(buffer);
-  SymbolTable::create_table();
-
-  // Create the shared string table using the compact table
-  buffer = (char*)StringTable::init_shared_table(mapinfo, buffer);
-
-  // Create the shared dictionary using the bucket array at this spot in
-  // the misc data space.  Since the shared dictionary table is never
-  // modified, this region (of mapped pages) will be (effectively, if
-  // not explicitly) read-only.
-
   int sharedDictionaryLen = *(intptr_t*)buffer;
   buffer += sizeof(intptr_t);
   int number_of_entries = *(intptr_t*)buffer;
@@ -1129,9 +1134,14 @@
   buffer += sizeof(intptr_t);
   buffer += len;
 
+  // Verify various attributes of the archive, plus initialize the
+  // shared string/symbol tables
   intptr_t* array = (intptr_t*)buffer;
   ReadClosure rc(&array);
-  serialize(&rc);
+  serialize(&rc, NULL, NULL);
+
+  // Initialize the run-time symbol table.
+  SymbolTable::create_table();
 
   // Close the mapinfo file
   mapinfo->close();
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
 #define MIN_SHARED_READ_WRITE_SIZE      (NOT_LP64(7*M) LP64_ONLY(12*M))
 
 #define DEFAULT_SHARED_READ_ONLY_SIZE   (NOT_LP64(12*M) LP64_ONLY(16*M))
-#define MIN_SHARED_READ_ONLY_SIZE       (NOT_LP64(8*M) LP64_ONLY(9*M))
+#define MIN_SHARED_READ_ONLY_SIZE       (NOT_LP64(9*M) LP64_ONLY(10*M))
 
 // the MIN_SHARED_MISC_DATA_SIZE and MIN_SHARED_MISC_CODE_SIZE estimates are based on
 // the sizes required for dumping the archive using the default classlist. The sizes
@@ -193,7 +193,8 @@
                                       void** vtable,
                                       char** md_top, char* md_end,
                                       char** mc_top, char* mc_end);
-  static void serialize(SerializeClosure* sc);
+  static void serialize(SerializeClosure* sc, GrowableArray<MemRegion> *string_space,
+                        size_t* space_size);
 
   static MetaspaceSharedStats* stats() {
     return &_stats;
--- a/hotspot/src/share/vm/memory/virtualspace.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/memory/virtualspace.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -751,6 +751,29 @@
   return low() <= (const char*) p && (const char*) p < high();
 }
 
+static void pretouch_expanded_memory(void* start, void* end) {
+  assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
+  assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
+
+  os::pretouch_memory(start, end);
+}
+
+static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
+  if (os::commit_memory(start, size, alignment, executable)) {
+    if (pre_touch || AlwaysPreTouch) {
+      pretouch_expanded_memory(start, start + size);
+    }
+    return true;
+  }
+
+  debug_only(warning(
+      "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
+      " size=" SIZE_FORMAT ", executable=%d) failed",
+      p2i(start), p2i(start + size), size, executable);)
+
+  return false;
+}
+
 /*
    First we need to determine if a particular virtual space is using large
    pages.  This is done at the initialize function and only virtual spaces
@@ -764,7 +787,9 @@
    allocated with default pages.
 */
 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
-  if (uncommitted_size() < bytes) return false;
+  if (uncommitted_size() < bytes) {
+    return false;
+  }
 
   if (special()) {
     // don't commit memory if the entire space is pinned in memory
@@ -774,30 +799,23 @@
 
   char* previous_high = high();
   char* unaligned_new_high = high() + bytes;
-  assert(unaligned_new_high <= high_boundary(),
-         "cannot expand by more than upper boundary");
+  assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 
   // Calculate where the new high for each of the regions should be.  If
   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   // then the unaligned lower and upper new highs would be the
   // lower_high() and upper_high() respectively.
-  char* unaligned_lower_new_high =
-    MIN2(unaligned_new_high, lower_high_boundary());
-  char* unaligned_middle_new_high =
-    MIN2(unaligned_new_high, middle_high_boundary());
-  char* unaligned_upper_new_high =
-    MIN2(unaligned_new_high, upper_high_boundary());
+  char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
+  char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
+  char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 
   // Align the new highs based on the regions alignment.  lower and upper
   // alignment will always be default page size.  middle alignment will be
   // LargePageSizeInBytes if the actual size of the virtual space is in
   // fact larger than LargePageSizeInBytes.
-  char* aligned_lower_new_high =
-    (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
-  char* aligned_middle_new_high =
-    (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
-  char* aligned_upper_new_high =
-    (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
+  char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+  char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
+  char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 
   // Determine which regions need to grow in this expand_by call.
   // If you are growing in the lower region, high() must be in that
@@ -808,75 +826,48 @@
   // is an intra or inter region growth.
   size_t lower_needs = 0;
   if (aligned_lower_new_high > lower_high()) {
-    lower_needs =
-      pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
+    lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   }
   size_t middle_needs = 0;
   if (aligned_middle_new_high > middle_high()) {
-    middle_needs =
-      pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
+    middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   }
   size_t upper_needs = 0;
   if (aligned_upper_new_high > upper_high()) {
-    upper_needs =
-      pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
+    upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   }
 
   // Check contiguity.
-  assert(low_boundary() <= lower_high() &&
-         lower_high() <= lower_high_boundary(),
+  assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
          "high address must be contained within the region");
-  assert(lower_high_boundary() <= middle_high() &&
-         middle_high() <= middle_high_boundary(),
+  assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
          "high address must be contained within the region");
-  assert(middle_high_boundary() <= upper_high() &&
-         upper_high() <= upper_high_boundary(),
+  assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
          "high address must be contained within the region");
 
   // Commit regions
   if (lower_needs > 0) {
-    assert(low_boundary() <= lower_high() &&
-           lower_high() + lower_needs <= lower_high_boundary(),
-           "must not expand beyond region");
-    if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
-      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
-                         ", lower_needs=" SIZE_FORMAT ", %d) failed",
-                         p2i(lower_high()), lower_needs, _executable);)
+    assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
+    if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
       return false;
-    } else {
-      _lower_high += lower_needs;
     }
+    _lower_high += lower_needs;
   }
+
   if (middle_needs > 0) {
-    assert(lower_high_boundary() <= middle_high() &&
-           middle_high() + middle_needs <= middle_high_boundary(),
-           "must not expand beyond region");
-    if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
-                           _executable)) {
-      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
-                         ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
-                         ", %d) failed", p2i(middle_high()), middle_needs,
-                         middle_alignment(), _executable);)
+    assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
+    if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
       return false;
     }
     _middle_high += middle_needs;
   }
+
   if (upper_needs > 0) {
-    assert(middle_high_boundary() <= upper_high() &&
-           upper_high() + upper_needs <= upper_high_boundary(),
-           "must not expand beyond region");
-    if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
-      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
-                         ", upper_needs=" SIZE_FORMAT ", %d) failed",
-                         p2i(upper_high()), upper_needs, _executable);)
+    assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
+    if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
       return false;
-    } else {
-      _upper_high += upper_needs;
     }
-  }
-
-  if (pre_touch || AlwaysPreTouch) {
-    os::pretouch_memory(previous_high, unaligned_new_high);
+    _upper_high += upper_needs;
   }
 
   _high += bytes;
--- a/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Wed Apr 20 11:11:56 2016 +0000
@@ -40,6 +40,7 @@
 # include "memory/resourceArea.hpp"
 # include "utilities/macros.hpp"
 #if INCLUDE_JVMTI
+# include "logging/log.hpp"
 # include "oops/oop.inline.hpp"
 # include "prims/jvmtiEnter.hpp"
 # include "prims/jvmtiRawMonitor.hpp"
@@ -415,7 +416,7 @@
     <xsl:value-of select="$space"/>
     <xsl:text>  if (trace_flags) {</xsl:text>
     <xsl:value-of select="$space"/>
-    <xsl:text>    tty->print_cr("JVMTI [non-attached thread] %s %s",  func_name,</xsl:text>
+    <xsl:text>    log_trace(jvmti)("[non-attached thread] %s %s",  func_name,</xsl:text>
     <xsl:value-of select="$space"/>
     <xsl:text>    JvmtiUtil::error_name(JVMTI_ERROR_UNATTACHED_THREAD));</xsl:text>
     <xsl:value-of select="$space"/>
@@ -452,7 +453,7 @@
 </xsl:text>
     <xsl:if test="$trace='Trace'">
       <xsl:text>    if (trace_flags) {
-          tty->print_cr("JVMTI [%s] %s %s",  curr_thread_name, func_name, 
+          log_trace(jvmti)("[%s] %s %s",  curr_thread_name, func_name, 
                     JvmtiUtil::error_name(JVMTI_ERROR_MUST_POSSESS_CAPABILITY));
     }
 </xsl:text>
@@ -486,7 +487,7 @@
 </xsl:text>
     <xsl:if test="$trace='Trace'">
       <xsl:text>    if (trace_flags) {
-          tty->print_cr("JVMTI [-] %s %s",  func_name, 
+          log_trace(jvmti)("[-] %s %s",  func_name, 
                     JvmtiUtil::error_name(JVMTI_ERROR_WRONG_PHASE));
     }
 </xsl:text>
@@ -509,7 +510,7 @@
 </xsl:text>
     <xsl:if test="$trace='Trace'">
       <xsl:text>    if (trace_flags) {
-          tty->print_cr("JVMTI [-] %s %s",  func_name, 
+          log_trace(jvmti)("[-] %s %s",  func_name, 
                     JvmtiUtil::error_name(JVMTI_ERROR_WRONG_PHASE));
     }
 </xsl:text>
@@ -522,7 +523,7 @@
 </xsl:text>
     <xsl:if test="$trace='Trace'">
       <xsl:text>    if (trace_flags) {
-          tty->print_cr("JVMTI [-] %s %s",  func_name, 
+          log_trace(jvmti)("[-] %s %s",  func_name, 
                     JvmtiUtil::error_name(JVMTI_ERROR_WRONG_PHASE));
     }
 </xsl:text>
@@ -541,7 +542,7 @@
 </xsl:text>
     <xsl:if test="$trace='Trace'">
       <xsl:text>    if (trace_flags) {
-          tty->print_cr("JVMTI [%s] %s %s  env=" PTR_FORMAT,  curr_thread_name, func_name, 
+          log_trace(jvmti)("[%s] %s %s  env=" PTR_FORMAT,  curr_thread_name, func_name, 
                     JvmtiUtil::error_name(JVMTI_ERROR_INVALID_ENVIRONMENT), p2i(env));
     }
 </xsl:text>
@@ -667,7 +668,7 @@
     <xsl:with-param name="endParam" select="."/>
   </xsl:apply-templates>
   <xsl:text>      }
-        tty->print_cr("JVMTI [%s] %s } %s - erroneous arg is </xsl:text>
+        log_error(jvmti)("[%s] %s } %s - erroneous arg is </xsl:text>
     <xsl:value-of select="@id"/>
     <xsl:value-of select="$comment"/>
     <xsl:text>",  curr_thread_name, func_name, 
@@ -692,10 +693,10 @@
 </xsl:text>
     <xsl:apply-templates select="." mode="traceIn"/>
     <xsl:text>    }
-    tty->print_cr("JVMTI [%s] %s } %s",  curr_thread_name, func_name, 
+    log_error(jvmti)("[%s] %s } %s",  curr_thread_name, func_name, 
                   JvmtiUtil::error_name(err));
   } else if ((trace_flags &amp; JvmtiTrace::SHOW_OUT) != 0) {
-    tty->print_cr("JVMTI [%s] %s }",  curr_thread_name, func_name);
+    log_trace(jvmti)("[%s] %s }",  curr_thread_name, func_name);
   }
 </xsl:text>
   </xsl:if>
@@ -703,7 +704,7 @@
 
 <xsl:template match="function" mode="traceIn">
   <xsl:param name="endParam"></xsl:param>
-  <xsl:text>          tty->print_cr("JVMTI [%s] %s { </xsl:text>
+  <xsl:text>          log_trace(jvmti)("[%s] %s { </xsl:text>
   <xsl:apply-templates select="parameters" mode="traceInFormat">
     <xsl:with-param name="endParam" select="$endParam"/>    
   </xsl:apply-templates>
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -1675,7 +1675,7 @@
   HandleMark hm(thread);
   KlassHandle kh (thread, k_oop);
 
-  TraceTime t("FollowReferences", TraceJVMTIObjectTagging);
+  TraceTime t("FollowReferences", TRACETIME_LOG(Debug, jvmti, objecttagging));
   JvmtiTagMap::tag_map_for(this)->follow_references(heap_filter, kh, initial_object, callbacks, user_data);
   return JVMTI_ERROR_NONE;
 } /* end FollowReferences */
@@ -1706,7 +1706,7 @@
   HandleMark hm(thread);
   KlassHandle kh (thread, k_oop);
 
-  TraceTime t("IterateThroughHeap", TraceJVMTIObjectTagging);
+  TraceTime t("IterateThroughHeap", TRACETIME_LOG(Debug, jvmti, objecttagging));
   JvmtiTagMap::tag_map_for(this)->iterate_through_heap(heap_filter, kh, callbacks, user_data);
   return JVMTI_ERROR_NONE;
 } /* end IterateThroughHeap */
@@ -1738,7 +1738,7 @@
 // tag_result_ptr - NULL is a valid value, must be checked
 jvmtiError
 JvmtiEnv::GetObjectsWithTags(jint tag_count, const jlong* tags, jint* count_ptr, jobject** object_result_ptr, jlong** tag_result_ptr) {
-  TraceTime t("GetObjectsWithTags", TraceJVMTIObjectTagging);
+  TraceTime t("GetObjectsWithTags", TRACETIME_LOG(Debug, jvmti, objecttagging));
   return JvmtiTagMap::tag_map_for(this)->get_objects_with_tags((jlong*)tags, tag_count, count_ptr, object_result_ptr, tag_result_ptr);
 } /* end GetObjectsWithTags */
 
@@ -1771,7 +1771,7 @@
 // user_data - NULL is a valid value, must be checked
 jvmtiError
 JvmtiEnv::IterateOverReachableObjects(jvmtiHeapRootCallback heap_root_callback, jvmtiStackReferenceCallback stack_ref_callback, jvmtiObjectReferenceCallback object_ref_callback, const void* user_data) {
-  TraceTime t("IterateOverReachableObjects", TraceJVMTIObjectTagging);
+  TraceTime t("IterateOverReachableObjects", TRACETIME_LOG(Debug, jvmti, objecttagging));
   JvmtiTagMap::tag_map_for(this)->iterate_over_reachable_objects(heap_root_callback, stack_ref_callback, object_ref_callback, user_data);
   return JVMTI_ERROR_NONE;
 } /* end IterateOverReachableObjects */
@@ -1781,7 +1781,7 @@
 // user_data - NULL is a valid value, must be checked
 jvmtiError
 JvmtiEnv::IterateOverHeap(jvmtiHeapObjectFilter object_filter, jvmtiHeapObjectCallback heap_object_callback, const void* user_data) {
-  TraceTime t("IterateOverHeap", TraceJVMTIObjectTagging);
+  TraceTime t("IterateOverHeap", TRACETIME_LOG(Debug, jvmti, objecttagging));
   Thread *thread = Thread::current();
   HandleMark hm(thread);
   JvmtiTagMap::tag_map_for(this)->iterate_over_heap(object_filter, KlassHandle(), heap_object_callback, user_data);
@@ -1805,7 +1805,7 @@
   Thread *thread = Thread::current();
   HandleMark hm(thread);
   KlassHandle klass (thread, k_oop);
-  TraceTime t("IterateOverInstancesOfClass", TraceJVMTIObjectTagging);
+  TraceTime t("IterateOverInstancesOfClass", TRACETIME_LOG(Debug, jvmti, objecttagging));
   JvmtiTagMap::tag_map_for(this)->iterate_over_heap(object_filter, klass, heap_object_callback, user_data);
   return JVMTI_ERROR_NONE;
 } /* end IterateOverInstancesOfClass */
--- a/hotspot/src/share/vm/prims/jvmtiEventController.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiEventController.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jvmtiEventController.hpp"
 #include "prims/jvmtiEventController.inline.hpp"
@@ -42,7 +43,7 @@
 #define EC_TRACE(out) do { \
   if (JvmtiTrace::trace_event_controller()) { \
     SafeResourceMark rm; \
-    tty->print_cr out; \
+    log_trace(jvmti) out; \
   } \
 } while (0)
 #else
@@ -344,7 +345,7 @@
 
 
 void JvmtiEventControllerPrivate::enter_interp_only_mode(JvmtiThreadState *state) {
-  EC_TRACE(("JVMTI [%s] # Entering interpreter only mode",
+  EC_TRACE(("[%s] # Entering interpreter only mode",
             JvmtiTrace::safe_get_thread_name(state->get_thread())));
 
   VM_EnterInterpOnlyMode op(state);
@@ -354,7 +355,7 @@
 
 void
 JvmtiEventControllerPrivate::leave_interp_only_mode(JvmtiThreadState *state) {
-  EC_TRACE(("JVMTI [%s] # Leaving interpreter only mode",
+  EC_TRACE(("[%s] # Leaving interpreter only mode",
             JvmtiTrace::safe_get_thread_name(state->get_thread())));
   state->leave_interp_only_mode();
 }
@@ -370,7 +371,7 @@
       jlong bit = JvmtiEventEnabled::bit_for((jvmtiEvent)ei);
       if (changed & bit) {
         // it changed, print it
-        tty->print_cr("JVMTI [%s] # %s event %s",
+         log_trace(jvmti)("[%s] # %s event %s",
                       JvmtiTrace::safe_get_thread_name(state->get_thread()),
                       (now_enabled & bit)? "Enabling" : "Disabling", JvmtiTrace::event_name((jvmtiEvent)ei));
       }
@@ -390,7 +391,7 @@
       jlong bit = JvmtiEventEnabled::bit_for((jvmtiEvent)ei);
       if (changed & bit) {
         // it changed, print it
-        tty->print_cr("JVMTI [-] # %s event %s",
+         log_trace(jvmti)("[-] # %s event %s",
                       (now_enabled & bit)? "Enabling" : "Disabling", JvmtiTrace::event_name((jvmtiEvent)ei));
       }
     }
@@ -563,7 +564,7 @@
   jlong was_any_env_thread_enabled = JvmtiEventController::_universal_global_event_enabled.get_bits();
   jlong any_env_thread_enabled = 0;
 
-  EC_TRACE(("JVMTI [-] # recompute enabled - before " UINT64_FORMAT_X, was_any_env_thread_enabled));
+  EC_TRACE(("[-] # recompute enabled - before " UINT64_FORMAT_X, was_any_env_thread_enabled));
 
   // compute non-thread-filters events.
   // This must be done separately from thread-filtered events, since some
@@ -643,7 +644,7 @@
 
   }
 
-  EC_TRACE(("JVMTI [-] # recompute enabled - after " UINT64_FORMAT_X, any_env_thread_enabled));
+  EC_TRACE(("[-] # recompute enabled - after " UINT64_FORMAT_X, any_env_thread_enabled));
 }
 
 
@@ -653,7 +654,7 @@
   assert(thread == Thread::current(), "must be current thread");
   assert(JvmtiEnvBase::environments_might_exist(), "to enter event controller, JVM TI environments must exist");
 
-  EC_TRACE(("JVMTI [%s] # thread started", JvmtiTrace::safe_get_thread_name(thread)));
+  EC_TRACE(("[%s] # thread started", JvmtiTrace::safe_get_thread_name(thread)));
 
   // if we have any thread filtered events globally enabled, create/update the thread state
   if ((JvmtiEventController::_universal_global_event_enabled.get_bits() & THREAD_FILTERED_EVENT_BITS) != 0) {
@@ -673,7 +674,7 @@
   // May be called after all environments have been disposed.
   assert(JvmtiThreadState_lock->is_locked(), "sanity check");
 
-  EC_TRACE(("JVMTI [%s] # thread ended", JvmtiTrace::safe_get_thread_name(thread)));
+  EC_TRACE(("[%s] # thread ended", JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiThreadState *state = thread->jvmti_thread_state();
   assert(state != NULL, "else why are we here?");
@@ -684,7 +685,7 @@
                                                       const jvmtiEventCallbacks* callbacks,
                                                       jint size_of_callbacks) {
   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
-  EC_TRACE(("JVMTI [*] # set event callbacks"));
+  EC_TRACE(("[*] # set event callbacks"));
 
   env->set_event_callbacks(callbacks, size_of_callbacks);
   jlong enabled_bits = 0;
@@ -704,7 +705,7 @@
                                                           jvmtiExtensionEvent callback)
 {
   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
-  EC_TRACE(("JVMTI [*] # set extension event callback"));
+  EC_TRACE(("[*] # set extension event callback"));
 
   // extension events are allocated below JVMTI_MIN_EVENT_TYPE_VAL
   assert(extension_event_index >= (jint)EXT_MIN_EVENT_TYPE_VAL &&
@@ -750,7 +751,7 @@
 void
 JvmtiEventControllerPrivate::env_initialize(JvmtiEnvBase *env) {
   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
-  EC_TRACE(("JVMTI [*] # env initialize"));
+  EC_TRACE(("[*] # env initialize"));
 
   if (JvmtiEnvBase::is_vm_live()) {
     // if we didn't initialize event info already (this is a late
@@ -772,7 +773,7 @@
 void
 JvmtiEventControllerPrivate::env_dispose(JvmtiEnvBase *env) {
   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
-  EC_TRACE(("JVMTI [*] # env dispose"));
+  EC_TRACE(("[*] # env dispose"));
 
   // Before the environment is marked disposed, disable all events on this
   // environment (by zapping the callbacks).  As a result, the disposed
@@ -794,7 +795,7 @@
                                           jvmtiEvent event_type, bool enabled) {
   assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
 
-  EC_TRACE(("JVMTI [%s] # user %s event %s",
+  EC_TRACE(("[%s] # user %s event %s",
             thread==NULL? "ALL": JvmtiTrace::safe_get_thread_name(thread),
             enabled? "enabled" : "disabled", JvmtiTrace::event_name(event_type)));
 
@@ -813,7 +814,7 @@
 
 void
 JvmtiEventControllerPrivate::set_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
-  EC_TRACE(("JVMTI [%s] # set frame pop - frame=%d",
+  EC_TRACE(("[%s] # set frame pop - frame=%d",
             JvmtiTrace::safe_get_thread_name(ets->get_thread()),
             fpop.frame_number() ));
 
@@ -824,7 +825,7 @@
 
 void
 JvmtiEventControllerPrivate::clear_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
-  EC_TRACE(("JVMTI [%s] # clear frame pop - frame=%d",
+  EC_TRACE(("[%s] # clear frame pop - frame=%d",
             JvmtiTrace::safe_get_thread_name(ets->get_thread()),
             fpop.frame_number() ));
 
@@ -837,7 +838,7 @@
 JvmtiEventControllerPrivate::clear_to_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
   int cleared_cnt = ets->get_frame_pops()->clear_to(fpop);
 
-  EC_TRACE(("JVMTI [%s] # clear to frame pop - frame=%d, count=%d",
+  EC_TRACE(("[%s] # clear to frame pop - frame=%d, count=%d",
             JvmtiTrace::safe_get_thread_name(ets->get_thread()),
             fpop.frame_number(),
             cleared_cnt ));
@@ -863,7 +864,7 @@
     return;
   }
 
-  EC_TRACE(("JVMTI [-] # change field watch - %s %s count=%d",
+  EC_TRACE(("[-] # change field watch - %s %s count=%d",
             event_type==JVMTI_EVENT_FIELD_MODIFICATION? "modification" : "access",
             added? "add" : "remove",
             *count_addr));
@@ -893,7 +894,7 @@
     return;
   }
 
-  EC_TRACE(("JVMTI [-] # VM live"));
+  EC_TRACE(("[-] # VM live"));
 
 #ifdef ASSERT
   // check that our idea and the spec's idea of threaded events match
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -29,6 +29,8 @@
 #include "code/scopeDesc.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/objArrayOop.hpp"
@@ -60,8 +62,8 @@
 #endif // INCLUDE_ALL_GCS
 
 #ifdef JVMTI_TRACE
-#define EVT_TRACE(evt,out) if ((JvmtiTrace::event_trace_flags(evt) & JvmtiTrace::SHOW_EVENT_SENT) != 0) { SafeResourceMark rm; tty->print_cr out; }
-#define EVT_TRIG_TRACE(evt,out) if ((JvmtiTrace::event_trace_flags(evt) & JvmtiTrace::SHOW_EVENT_TRIGGER) != 0) { SafeResourceMark rm; tty->print_cr out; }
+#define EVT_TRACE(evt,out) if ((JvmtiTrace::event_trace_flags(evt) & JvmtiTrace::SHOW_EVENT_SENT) != 0) { SafeResourceMark rm; log_trace(jvmti) out; }
+#define EVT_TRIG_TRACE(evt,out) if ((JvmtiTrace::event_trace_flags(evt) & JvmtiTrace::SHOW_EVENT_TRIGGER) != 0) { SafeResourceMark rm; log_trace(jvmti) out; }
 #else
 #define EVT_TRIG_TRACE(evt,out)
 #define EVT_TRACE(evt,out)
@@ -423,7 +425,10 @@
                          THREAD);
 
   if (HAS_PENDING_EXCEPTION) {
-    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    LogTarget(Trace, jvmti) log;
+    LogStreamCHeap log_stream(log);
+    java_lang_Throwable::print(PENDING_EXCEPTION, &log_stream);
+    log_stream.cr();
     CLEAR_PENDING_EXCEPTION;
     return;
   }
@@ -465,7 +470,7 @@
 //
 
 void JvmtiExport::post_early_vm_start() {
-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Trg Early VM start event triggered" ));
+  EVT_TRIG_TRACE(JVMTI_EVENT_VM_START, ("Trg Early VM start event triggered" ));
 
   // can now enable some events
   JvmtiEventController::vm_start();
@@ -474,7 +479,7 @@
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     // Only early vmstart envs post early VMStart event
     if (env->early_vmstart_env() && env->is_enabled(JVMTI_EVENT_VM_START)) {
-      EVT_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Evt Early VM start event sent" ));
+      EVT_TRACE(JVMTI_EVENT_VM_START, ("Evt Early VM start event sent" ));
       JavaThread *thread  = JavaThread::current();
       JvmtiThreadEventMark jem(thread);
       JvmtiJavaThreadEventTransition jet(thread);
@@ -487,7 +492,7 @@
 }
 
 void JvmtiExport::post_vm_start() {
-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Trg VM start event triggered" ));
+  EVT_TRIG_TRACE(JVMTI_EVENT_VM_START, ("Trg VM start event triggered" ));
 
   // can now enable some events
   JvmtiEventController::vm_start();
@@ -496,7 +501,7 @@
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     // Early vmstart envs do not post normal VMStart event
     if (!env->early_vmstart_env() && env->is_enabled(JVMTI_EVENT_VM_START)) {
-      EVT_TRACE(JVMTI_EVENT_VM_START, ("JVMTI Evt VM start event sent" ));
+      EVT_TRACE(JVMTI_EVENT_VM_START, ("Evt VM start event sent" ));
 
       JavaThread *thread  = JavaThread::current();
       JvmtiThreadEventMark jem(thread);
@@ -511,7 +516,7 @@
 
 
 void JvmtiExport::post_vm_initialized() {
-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_INIT, ("JVMTI Trg VM init event triggered" ));
+  EVT_TRIG_TRACE(JVMTI_EVENT_VM_INIT, ("Trg VM init event triggered" ));
 
   // can now enable events
   JvmtiEventController::vm_init();
@@ -519,7 +524,7 @@
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_VM_INIT)) {
-      EVT_TRACE(JVMTI_EVENT_VM_INIT, ("JVMTI Evt VM init event sent" ));
+      EVT_TRACE(JVMTI_EVENT_VM_INIT, ("Evt VM init event sent" ));
 
       JavaThread *thread  = JavaThread::current();
       JvmtiThreadEventMark jem(thread);
@@ -534,12 +539,12 @@
 
 
 void JvmtiExport::post_vm_death() {
-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_DEATH, ("JVMTI Trg VM death event triggered" ));
+  EVT_TRIG_TRACE(JVMTI_EVENT_VM_DEATH, ("Trg VM death event triggered" ));
 
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_VM_DEATH)) {
-      EVT_TRACE(JVMTI_EVENT_VM_DEATH, ("JVMTI Evt VM death event sent" ));
+      EVT_TRACE(JVMTI_EVENT_VM_DEATH, ("Evt VM death event sent" ));
 
       JavaThread *thread  = JavaThread::current();
       JvmtiEventMark jem(thread);
@@ -632,9 +637,6 @@
   }
 
   void post() {
-//    EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
-//                   ("JVMTI [%s] class file load hook event triggered",
-//                    JvmtiTrace::safe_get_thread_name(_thread)));
     post_all_envs();
     copy_modified_data();
   }
@@ -670,11 +672,6 @@
     }
     unsigned char *new_data = NULL;
     jint new_len = 0;
-//    EVT_TRACE(JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
-//     ("JVMTI [%s] class file load hook event sent %s  data_ptr = %d, data_len = %d",
-//               JvmtiTrace::safe_get_thread_name(_thread),
-//               _h_name == NULL ? "NULL" : _h_name->as_utf8(),
-//               _curr_data, _curr_len ));
     JvmtiClassFileLoadEventMark jem(_thread, _h_name, _class_loader,
                                     _h_protection_domain,
                                     _h_class_being_redefined);
@@ -840,7 +837,7 @@
   }
   JavaThread* thread = JavaThread::current();
   EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
-                 ("JVMTI [%s] method compile unload event triggered",
+                 ("[%s] method compile unload event triggered",
                   JvmtiTrace::safe_get_thread_name(thread)));
 
   // post the event for each environment that has this event enabled.
@@ -851,7 +848,7 @@
         continue;
       }
       EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
-                ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
+                ("[%s] class compile method unload event sent jmethodID " PTR_FORMAT,
                  JvmtiTrace::safe_get_thread_name(thread), p2i(method)));
 
       ResourceMark rm(thread);
@@ -879,7 +876,7 @@
   if (state == NULL) {
     return;
   }
-  EVT_TRIG_TRACE(JVMTI_EVENT_BREAKPOINT, ("JVMTI [%s] Trg Breakpoint triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_BREAKPOINT, ("[%s] Trg Breakpoint triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiEnvThreadStateIterator it(state);
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
@@ -887,7 +884,7 @@
     if (!ets->breakpoint_posted() && ets->is_enabled(JVMTI_EVENT_BREAKPOINT)) {
       ThreadState old_os_state = thread->osthread()->get_state();
       thread->osthread()->set_state(BREAKPOINTED);
-      EVT_TRACE(JVMTI_EVENT_BREAKPOINT, ("JVMTI [%s] Evt Breakpoint sent %s.%s @ " INTX_FORMAT,
+      EVT_TRACE(JVMTI_EVENT_BREAKPOINT, ("[%s] Evt Breakpoint sent %s.%s @ " INTX_FORMAT,
                      JvmtiTrace::safe_get_thread_name(thread),
                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
@@ -965,7 +962,7 @@
   if (state == NULL) {
     return;
   }
-  EVT_TRIG_TRACE(JVMTI_EVENT_SINGLE_STEP, ("JVMTI [%s] Trg Single Step triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_SINGLE_STEP, ("[%s] Trg Single Step triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
   if (!state->hide_single_stepping()) {
     if (state->is_pending_step_for_popframe()) {
@@ -1004,7 +1001,7 @@
   HandleMark hm(thread);
   KlassHandle kh(thread, klass);
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_LOAD, ("JVMTI [%s] Trg Class Load triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_LOAD, ("[%s] Trg Class Load triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiThreadState* state = thread->jvmti_thread_state();
   if (state == NULL) {
@@ -1017,7 +1014,7 @@
       if (env->phase() == JVMTI_PHASE_PRIMORDIAL) {
         continue;
       }
-      EVT_TRACE(JVMTI_EVENT_CLASS_LOAD, ("JVMTI [%s] Evt Class Load sent %s",
+      EVT_TRACE(JVMTI_EVENT_CLASS_LOAD, ("[%s] Evt Class Load sent %s",
                                          JvmtiTrace::safe_get_thread_name(thread),
                                          kh()==NULL? "NULL" : kh()->external_name() ));
       JvmtiClassEventMark jem(thread, kh());
@@ -1038,7 +1035,7 @@
   HandleMark hm(thread);
   KlassHandle kh(thread, klass);
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("JVMTI [%s] Trg Class Prepare triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("[%s] Trg Class Prepare triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiThreadState* state = thread->jvmti_thread_state();
   if (state == NULL) {
@@ -1051,7 +1048,7 @@
       if (env->phase() == JVMTI_PHASE_PRIMORDIAL) {
         continue;
       }
-      EVT_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("JVMTI [%s] Evt Class Prepare sent %s",
+      EVT_TRACE(JVMTI_EVENT_CLASS_PREPARE, ("[%s] Evt Class Prepare sent %s",
                                             JvmtiTrace::safe_get_thread_name(thread),
                                             kh()==NULL? "NULL" : kh()->external_name() ));
       JvmtiClassEventMark jem(thread, kh());
@@ -1072,7 +1069,7 @@
   HandleMark hm(thread);
   KlassHandle kh(thread, klass);
 
-  EVT_TRIG_TRACE(EXT_EVENT_CLASS_UNLOAD, ("JVMTI [?] Trg Class Unload triggered" ));
+  EVT_TRIG_TRACE(EXT_EVENT_CLASS_UNLOAD, ("[?] Trg Class Unload triggered" ));
   if (JvmtiEventController::is_enabled((jvmtiEvent)EXT_EVENT_CLASS_UNLOAD)) {
     assert(thread->is_VM_thread(), "wrong thread");
 
@@ -1086,7 +1083,7 @@
         continue;
       }
       if (env->is_enabled((jvmtiEvent)EXT_EVENT_CLASS_UNLOAD)) {
-        EVT_TRACE(EXT_EVENT_CLASS_UNLOAD, ("JVMTI [?] Evt Class Unload sent %s",
+        EVT_TRACE(EXT_EVENT_CLASS_UNLOAD, ("[?] Evt Class Unload sent %s",
                   kh()==NULL? "NULL" : kh()->external_name() ));
 
         // do everything manually, since this is a proxy - needs special care
@@ -1125,7 +1122,7 @@
   }
   assert(thread->thread_state() == _thread_in_vm, "must be in vm state");
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_START, ("JVMTI [%s] Trg Thread Start event triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_START, ("[%s] Trg Thread Start event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   // do JVMTI thread initialization (if needed)
@@ -1140,7 +1137,7 @@
         continue;
       }
       if (env->is_enabled(JVMTI_EVENT_THREAD_START)) {
-        EVT_TRACE(JVMTI_EVENT_THREAD_START, ("JVMTI [%s] Evt Thread Start event sent",
+        EVT_TRACE(JVMTI_EVENT_THREAD_START, ("[%s] Evt Thread Start event sent",
                      JvmtiTrace::safe_get_thread_name(thread) ));
 
         JvmtiThreadEventMark jem(thread);
@@ -1159,7 +1156,7 @@
   if (JvmtiEnv::get_phase() < JVMTI_PHASE_PRIMORDIAL) {
     return;
   }
-  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_END, ("JVMTI [%s] Trg Thread End event triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_END, ("[%s] Trg Thread End event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiThreadState *state = thread->jvmti_thread_state();
@@ -1178,7 +1175,7 @@
         if (env->phase() == JVMTI_PHASE_PRIMORDIAL) {
           continue;
         }
-        EVT_TRACE(JVMTI_EVENT_THREAD_END, ("JVMTI [%s] Evt Thread End event sent",
+        EVT_TRACE(JVMTI_EVENT_THREAD_END, ("[%s] Evt Thread End event sent",
                      JvmtiTrace::safe_get_thread_name(thread) ));
 
         JvmtiThreadEventMark jem(thread);
@@ -1196,8 +1193,8 @@
   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at safepoint");
   assert(env->is_enabled(JVMTI_EVENT_OBJECT_FREE), "checking");
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_OBJECT_FREE, ("JVMTI [?] Trg Object Free triggered" ));
-  EVT_TRACE(JVMTI_EVENT_OBJECT_FREE, ("JVMTI [?] Evt Object Free sent"));
+  EVT_TRIG_TRACE(JVMTI_EVENT_OBJECT_FREE, ("[?] Trg Object Free triggered" ));
+  EVT_TRACE(JVMTI_EVENT_OBJECT_FREE, ("[?] Evt Object Free sent"));
 
   jvmtiEventObjectFree callback = env->callbacks()->ObjectFree;
   if (callback != NULL) {
@@ -1206,12 +1203,12 @@
 }
 
 void JvmtiExport::post_resource_exhausted(jint resource_exhausted_flags, const char* description) {
-  EVT_TRIG_TRACE(JVMTI_EVENT_RESOURCE_EXHAUSTED, ("JVMTI Trg resource exhausted event triggered" ));
+  EVT_TRIG_TRACE(JVMTI_EVENT_RESOURCE_EXHAUSTED, ("Trg resource exhausted event triggered" ));
 
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_RESOURCE_EXHAUSTED)) {
-      EVT_TRACE(JVMTI_EVENT_RESOURCE_EXHAUSTED, ("JVMTI Evt resource exhausted event sent" ));
+      EVT_TRACE(JVMTI_EVENT_RESOURCE_EXHAUSTED, ("Evt resource exhausted event sent" ));
 
       JavaThread *thread  = JavaThread::current();
       JvmtiThreadEventMark jem(thread);
@@ -1229,7 +1226,7 @@
   HandleMark hm(thread);
   methodHandle mh(thread, method);
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("JVMTI [%s] Trg Method Entry triggered %s.%s",
+  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("[%s] Trg Method Entry triggered %s.%s",
                      JvmtiTrace::safe_get_thread_name(thread),
                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
@@ -1246,7 +1243,7 @@
     JvmtiEnvThreadStateIterator it(state);
     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
       if (ets->is_enabled(JVMTI_EVENT_METHOD_ENTRY)) {
-        EVT_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("JVMTI [%s] Evt Method Entry sent %s.%s",
+        EVT_TRACE(JVMTI_EVENT_METHOD_ENTRY, ("[%s] Evt Method Entry sent %s.%s",
                                              JvmtiTrace::safe_get_thread_name(thread),
                                              (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                                              (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
@@ -1267,7 +1264,7 @@
   HandleMark hm(thread);
   methodHandle mh(thread, method);
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_EXIT, ("JVMTI [%s] Trg Method Exit triggered %s.%s",
+  EVT_TRIG_TRACE(JVMTI_EVENT_METHOD_EXIT, ("[%s] Trg Method Exit triggered %s.%s",
                      JvmtiTrace::safe_get_thread_name(thread),
                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
@@ -1303,7 +1300,7 @@
     JvmtiEnvThreadStateIterator it(state);
     for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
       if (ets->is_enabled(JVMTI_EVENT_METHOD_EXIT)) {
-        EVT_TRACE(JVMTI_EVENT_METHOD_EXIT, ("JVMTI [%s] Evt Method Exit sent %s.%s",
+        EVT_TRACE(JVMTI_EVENT_METHOD_EXIT, ("[%s] Evt Method Exit sent %s.%s",
                                             JvmtiTrace::safe_get_thread_name(thread),
                                             (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                                             (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
@@ -1332,7 +1329,7 @@
         // we have a NotifyFramePop entry for this frame.
         // now check that this env/thread wants this event
         if (ets->is_enabled(JVMTI_EVENT_FRAME_POP)) {
-          EVT_TRACE(JVMTI_EVENT_FRAME_POP, ("JVMTI [%s] Evt Frame Pop sent %s.%s",
+          EVT_TRACE(JVMTI_EVENT_FRAME_POP, ("[%s] Evt Frame Pop sent %s.%s",
                                             JvmtiTrace::safe_get_thread_name(thread),
                                             (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                                             (mh() == NULL) ? "NULL" : mh()->name()->as_C_string() ));
@@ -1370,7 +1367,7 @@
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
     ets->compare_and_set_current_location(mh(), location, JVMTI_EVENT_SINGLE_STEP);
     if (!ets->single_stepping_posted() && ets->is_enabled(JVMTI_EVENT_SINGLE_STEP)) {
-      EVT_TRACE(JVMTI_EVENT_SINGLE_STEP, ("JVMTI [%s] Evt Single Step sent %s.%s @ " INTX_FORMAT,
+      EVT_TRACE(JVMTI_EVENT_SINGLE_STEP, ("[%s] Evt Single Step sent %s.%s @ " INTX_FORMAT,
                     JvmtiTrace::safe_get_thread_name(thread),
                     (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                     (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
@@ -1401,7 +1398,7 @@
     return;
   }
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_EXCEPTION, ("JVMTI [%s] Trg Exception thrown triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_EXCEPTION, ("[%s] Trg Exception thrown triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
   if (!state->is_exception_detected()) {
     state->set_exception_detected();
@@ -1410,7 +1407,7 @@
       if (ets->is_enabled(JVMTI_EVENT_EXCEPTION) && (exception != NULL)) {
 
         EVT_TRACE(JVMTI_EVENT_EXCEPTION,
-                     ("JVMTI [%s] Evt Exception thrown sent %s.%s @ " INTX_FORMAT,
+                     ("[%s] Evt Exception thrown sent %s.%s @ " INTX_FORMAT,
                       JvmtiTrace::safe_get_thread_name(thread),
                       (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                       (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
@@ -1486,7 +1483,7 @@
     return;
   }
   EVT_TRIG_TRACE(JVMTI_EVENT_EXCEPTION_CATCH,
-                    ("JVMTI [%s] Trg unwind_due_to_exception triggered %s.%s @ %s" INTX_FORMAT " - %s",
+                    ("[%s] Trg unwind_due_to_exception triggered %s.%s @ %s" INTX_FORMAT " - %s",
                      JvmtiTrace::safe_get_thread_name(thread),
                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
@@ -1521,7 +1518,7 @@
       for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
         if (ets->is_enabled(JVMTI_EVENT_EXCEPTION_CATCH) && (exception_handle() != NULL)) {
           EVT_TRACE(JVMTI_EVENT_EXCEPTION_CATCH,
-                     ("JVMTI [%s] Evt ExceptionCatch sent %s.%s @ " INTX_FORMAT,
+                     ("[%s] Evt ExceptionCatch sent %s.%s @ " INTX_FORMAT,
                       JvmtiTrace::safe_get_thread_name(thread),
                       (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                       (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
@@ -1610,12 +1607,12 @@
   if (state == NULL) {
     return;
   }
-  EVT_TRIG_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("JVMTI [%s] Trg Field Access event triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("[%s] Trg Field Access event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiEnvThreadStateIterator it(state);
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
     if (ets->is_enabled(JVMTI_EVENT_FIELD_ACCESS)) {
-      EVT_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("JVMTI [%s] Evt Field Access event sent %s.%s @ " INTX_FORMAT,
+      EVT_TRACE(JVMTI_EVENT_FIELD_ACCESS, ("[%s] Evt Field Access event sent %s.%s @ " INTX_FORMAT,
                      JvmtiTrace::safe_get_thread_name(thread),
                      (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                      (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
@@ -1772,14 +1769,14 @@
     return;
   }
   EVT_TRIG_TRACE(JVMTI_EVENT_FIELD_MODIFICATION,
-                     ("JVMTI [%s] Trg Field Modification event triggered",
+                     ("[%s] Trg Field Modification event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiEnvThreadStateIterator it(state);
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
     if (ets->is_enabled(JVMTI_EVENT_FIELD_MODIFICATION)) {
       EVT_TRACE(JVMTI_EVENT_FIELD_MODIFICATION,
-                   ("JVMTI [%s] Evt Field Modification event sent %s.%s @ " INTX_FORMAT,
+                   ("[%s] Evt Field Modification event sent %s.%s @ " INTX_FORMAT,
                     JvmtiTrace::safe_get_thread_name(thread),
                     (mh() == NULL) ? "NULL" : mh()->klass_name()->as_C_string(),
                     (mh() == NULL) ? "NULL" : mh()->name()->as_C_string(),
@@ -1807,14 +1804,14 @@
   HandleMark hm(thread);
   methodHandle mh(thread, method);
 
-  EVT_TRIG_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("JVMTI [%s] Trg Native Method Bind event triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("[%s] Trg Native Method Bind event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   if (JvmtiEventController::is_enabled(JVMTI_EVENT_NATIVE_METHOD_BIND)) {
     JvmtiEnvIterator it;
     for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
       if (env->is_enabled(JVMTI_EVENT_NATIVE_METHOD_BIND)) {
-        EVT_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("JVMTI [%s] Evt Native Method Bind event sent",
+        EVT_TRACE(JVMTI_EVENT_NATIVE_METHOD_BIND, ("[%s] Evt Native Method Bind event sent",
                      JvmtiTrace::safe_get_thread_name(thread) ));
 
         JvmtiMethodEventMark jem(thread, mh);
@@ -1878,7 +1875,7 @@
   JavaThread* thread = JavaThread::current();
 
   EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
-                 ("JVMTI [%s] method compile load event triggered",
+                 ("[%s] method compile load event triggered",
                  JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiEnvIterator it;
@@ -1888,7 +1885,7 @@
         continue;
       }
       EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
-                ("JVMTI [%s] class compile method load event sent %s.%s  ",
+                ("[%s] class compile method load event sent %s.%s  ",
                 JvmtiTrace::safe_get_thread_name(thread),
                 (nm->method() == NULL) ? "NULL" : nm->method()->klass_name()->as_C_string(),
                 (nm->method() == NULL) ? "NULL" : nm->method()->name()->as_C_string()));
@@ -1921,12 +1918,12 @@
   }
   JavaThread* thread = JavaThread::current();
   EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
-                 ("JVMTI [%s] method compile load event triggered (by GenerateEvents)",
+                 ("[%s] method compile load event triggered (by GenerateEvents)",
                  JvmtiTrace::safe_get_thread_name(thread)));
   if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_LOAD)) {
 
     EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
-              ("JVMTI [%s] class compile method load event sent (by GenerateEvents), jmethodID=" PTR_FORMAT,
+              ("[%s] class compile method load event sent (by GenerateEvents), jmethodID=" PTR_FORMAT,
                JvmtiTrace::safe_get_thread_name(thread), p2i(method)));
 
     JvmtiEventMark jem(thread);
@@ -1949,13 +1946,13 @@
   ThreadInVMfromUnknown __tiv;
 
   EVT_TRIG_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
-                 ("JVMTI [%s] method dynamic code generated event triggered",
+                 ("[%s] method dynamic code generated event triggered",
                  JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_DYNAMIC_CODE_GENERATED)) {
       EVT_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
-                ("JVMTI [%s] dynamic code generated event sent for %s",
+                ("[%s] dynamic code generated event sent for %s",
                 JvmtiTrace::safe_get_thread_name(thread), name));
       JvmtiEventMark jem(thread);
       JvmtiJavaThreadEventTransition jet(thread);
@@ -1991,11 +1988,11 @@
 {
   JavaThread* thread = JavaThread::current();
   EVT_TRIG_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
-                 ("JVMTI [%s] dynamic code generated event triggered (by GenerateEvents)",
+                 ("[%s] dynamic code generated event triggered (by GenerateEvents)",
                   JvmtiTrace::safe_get_thread_name(thread)));
   if (env->is_enabled(JVMTI_EVENT_DYNAMIC_CODE_GENERATED)) {
     EVT_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
-              ("JVMTI [%s] dynamic code generated event sent for %s",
+              ("[%s] dynamic code generated event sent for %s",
                JvmtiTrace::safe_get_thread_name(thread), name));
     JvmtiEventMark jem(thread);
     JvmtiJavaThreadEventTransition jet(thread);
@@ -2048,13 +2045,13 @@
 void JvmtiExport::post_garbage_collection_finish() {
   Thread *thread = Thread::current(); // this event is posted from VM-Thread.
   EVT_TRIG_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
-                 ("JVMTI [%s] garbage collection finish event triggered",
+                 ("[%s] garbage collection finish event triggered",
                   JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_GARBAGE_COLLECTION_FINISH)) {
       EVT_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
-                ("JVMTI [%s] garbage collection finish event sent ",
+                ("[%s] garbage collection finish event sent",
                  JvmtiTrace::safe_get_thread_name(thread)));
       JvmtiThreadEventTransition jet(thread);
       // JNIEnv is NULL here because this event is posted from VM Thread
@@ -2069,13 +2066,13 @@
 void JvmtiExport::post_garbage_collection_start() {
   Thread* thread = Thread::current(); // this event is posted from vm-thread.
   EVT_TRIG_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_START,
-                 ("JVMTI [%s] garbage collection start event triggered",
+                 ("[%s] garbage collection start event triggered",
                   JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_GARBAGE_COLLECTION_START)) {
       EVT_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_START,
-                ("JVMTI [%s] garbage collection start event sent ",
+                ("[%s] garbage collection start event sent",
                  JvmtiTrace::safe_get_thread_name(thread)));
       JvmtiThreadEventTransition jet(thread);
       // JNIEnv is NULL here because this event is posted from VM Thread
@@ -2090,13 +2087,13 @@
 void JvmtiExport::post_data_dump() {
   Thread *thread = Thread::current();
   EVT_TRIG_TRACE(JVMTI_EVENT_DATA_DUMP_REQUEST,
-                 ("JVMTI [%s] data dump request event triggered",
+                 ("[%s] data dump request event triggered",
                   JvmtiTrace::safe_get_thread_name(thread)));
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_DATA_DUMP_REQUEST)) {
       EVT_TRACE(JVMTI_EVENT_DATA_DUMP_REQUEST,
-                ("JVMTI [%s] data dump request event sent ",
+                ("[%s] data dump request event sent",
                  JvmtiTrace::safe_get_thread_name(thread)));
      JvmtiThreadEventTransition jet(thread);
      // JNIEnv is NULL here because this event is posted from VM Thread
@@ -2123,14 +2120,14 @@
   Handle h(thread, object);
 
   EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTER,
-                     ("JVMTI [%s] montior contended enter event triggered",
+                     ("[%s] montior contended enter event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiEnvThreadStateIterator it(state);
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
     if (ets->is_enabled(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) {
       EVT_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTER,
-                   ("JVMTI [%s] monitor contended enter event sent",
+                   ("[%s] monitor contended enter event sent",
                     JvmtiTrace::safe_get_thread_name(thread)));
       JvmtiMonitorEventMark  jem(thread, h());
       JvmtiEnv *env = ets->get_env();
@@ -2158,14 +2155,14 @@
   Handle h(thread, object);
 
   EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED,
-                     ("JVMTI [%s] montior contended entered event triggered",
+                     ("[%s] montior contended entered event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiEnvThreadStateIterator it(state);
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
     if (ets->is_enabled(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) {
       EVT_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED,
-                   ("JVMTI [%s] monitor contended enter event sent",
+                   ("[%s] monitor contended enter event sent",
                     JvmtiTrace::safe_get_thread_name(thread)));
       JvmtiMonitorEventMark  jem(thread, h());
       JvmtiEnv *env = ets->get_env();
@@ -2189,14 +2186,14 @@
   Handle h(thread, object);
 
   EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAIT,
-                     ("JVMTI [%s] montior wait event triggered",
+                     ("[%s] montior wait event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiEnvThreadStateIterator it(state);
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
     if (ets->is_enabled(JVMTI_EVENT_MONITOR_WAIT)) {
       EVT_TRACE(JVMTI_EVENT_MONITOR_WAIT,
-                   ("JVMTI [%s] monitor wait event sent ",
+                   ("[%s] monitor wait event sent",
                     JvmtiTrace::safe_get_thread_name(thread)));
       JvmtiMonitorEventMark  jem(thread, h());
       JvmtiEnv *env = ets->get_env();
@@ -2225,14 +2222,14 @@
   Handle h(thread, object);
 
   EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAITED,
-                     ("JVMTI [%s] montior waited event triggered",
+                     ("[%s] montior waited event triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiEnvThreadStateIterator it(state);
   for (JvmtiEnvThreadState* ets = it.first(); ets != NULL; ets = it.next(ets)) {
     if (ets->is_enabled(JVMTI_EVENT_MONITOR_WAITED)) {
       EVT_TRACE(JVMTI_EVENT_MONITOR_WAITED,
-                   ("JVMTI [%s] monitor waited event sent ",
+                   ("[%s] monitor waited event sent",
                     JvmtiTrace::safe_get_thread_name(thread)));
       JvmtiMonitorEventMark  jem(thread, h());
       JvmtiEnv *env = ets->get_env();
@@ -2248,7 +2245,7 @@
 
 
 void JvmtiExport::post_vm_object_alloc(JavaThread *thread,  oop object) {
-  EVT_TRIG_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("JVMTI [%s] Trg vm object alloc triggered",
+  EVT_TRIG_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("[%s] Trg vm object alloc triggered",
                       JvmtiTrace::safe_get_thread_name(thread)));
   if (object == NULL) {
     return;
@@ -2258,7 +2255,7 @@
   JvmtiEnvIterator it;
   for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
     if (env->is_enabled(JVMTI_EVENT_VM_OBJECT_ALLOC)) {
-      EVT_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("JVMTI [%s] Evt vmobject alloc sent %s",
+      EVT_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("[%s] Evt vmobject alloc sent %s",
                                          JvmtiTrace::safe_get_thread_name(thread),
                                          object==NULL? "NULL" : object->klass()->external_name()));
 
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -27,6 +27,8 @@
 #include "interpreter/interpreter.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.inline.hpp"
@@ -321,12 +323,12 @@
   each_method_version_do(&Method::clear_breakpoint);
 }
 
-void JvmtiBreakpoint::print() {
+void JvmtiBreakpoint::print(outputStream* out) {
 #ifndef PRODUCT
+  ResourceMark rm;
   const char *class_name  = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
   const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
-
-  tty->print("Breakpoint(%s,%s,%d,%p)",class_name, method_name, _bci, getBcp());
+  out->print("Breakpoint(%s,%s,%d,%p)", class_name, method_name, _bci, getBcp());
 #endif
 }
 
@@ -389,16 +391,17 @@
   _bps.gc_epilogue();
 }
 
-void  JvmtiBreakpoints::print() {
+void JvmtiBreakpoints::print() {
 #ifndef PRODUCT
-  ResourceMark rm;
+  LogTarget(Trace, jvmti) log;
+  LogStreamCHeap log_stream(log);
 
   int n = _bps.length();
   for (int i=0; i<n; i++) {
     JvmtiBreakpoint& bp = _bps.at(i);
-    tty->print("%d: ", i);
-    bp.print();
-    tty->cr();
+    log_stream.print("%d: ", i);
+    bp.print(&log_stream);
+    log_stream.cr();
   }
 #endif
 }
@@ -875,22 +878,21 @@
 void JvmtiSuspendControl::print() {
 #ifndef PRODUCT
   MutexLocker mu(Threads_lock);
-  ResourceMark rm;
-
-  tty->print("Suspended Threads: [");
+  LogStreamHandle(Trace, jvmti) log_stream;
+  log_stream.print("Suspended Threads: [");
   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 #ifdef JVMTI_TRACE
     const char *name   = JvmtiTrace::safe_get_thread_name(thread);
 #else
     const char *name   = "";
 #endif /*JVMTI_TRACE */
-    tty->print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
+    log_stream.print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
     if (!thread->has_last_Java_frame()) {
-      tty->print("no stack");
+      log_stream.print("no stack");
     }
-    tty->print(") ");
+    log_stream.print(") ");
   }
-  tty->print_cr("]");
+  log_stream.print_cr("]");
 #endif
 }
 
--- a/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -34,6 +34,7 @@
 #include "prims/jvmtiUtil.hpp"
 #include "runtime/stackValueCollection.hpp"
 #include "runtime/vm_operations.hpp"
+#include "utilities/ostream.hpp"
 
 //
 // Forward Declarations
@@ -186,7 +187,7 @@
   void each_method_version_do(method_action meth_act);
   void set();
   void clear();
-  void print();
+  void print(outputStream* out);
 
   Method* method() { return _method; }
 
--- a/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -24,8 +24,10 @@
 
 #include "precompiled.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/log.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiManageCapabilities.hpp"
+
 static const jint CAPA_SIZE = (JVMTI_INTERNAL_CAPABILITY_COUNT + 7) / 8;
 
   // capabilities which are always potentially available
@@ -373,87 +375,87 @@
 #ifndef PRODUCT
 
 void JvmtiManageCapabilities:: print(const jvmtiCapabilities* cap) {
-  tty->print_cr("----- capabilities -----");
+  log_trace(jvmti)("----- capabilities -----");
   if (cap->can_tag_objects)
-    tty->print_cr("can_tag_objects");
+    log_trace(jvmti)("can_tag_objects");
   if (cap->can_generate_field_modification_events)
-    tty->print_cr("can_generate_field_modification_events");
+    log_trace(jvmti)("can_generate_field_modification_events");
   if (cap->can_generate_field_access_events)
-    tty->print_cr("can_generate_field_access_events");
+    log_trace(jvmti)("can_generate_field_access_events");
   if (cap->can_get_bytecodes)
-    tty->print_cr("can_get_bytecodes");
+    log_trace(jvmti)("can_get_bytecodes");
   if (cap->can_get_synthetic_attribute)
-    tty->print_cr("can_get_synthetic_attribute");
+    log_trace(jvmti)("can_get_synthetic_attribute");
   if (cap->can_get_owned_monitor_info)
-    tty->print_cr("can_get_owned_monitor_info");
+    log_trace(jvmti)("can_get_owned_monitor_info");
   if (cap->can_get_current_contended_monitor)
-    tty->print_cr("can_get_current_contended_monitor");
+    log_trace(jvmti)("can_get_current_contended_monitor");
   if (cap->can_get_monitor_info)
-    tty->print_cr("can_get_monitor_info");
+    log_trace(jvmti)("can_get_monitor_info");
   if (cap->can_get_constant_pool)
-    tty->print_cr("can_get_constant_pool");
+    log_trace(jvmti)("can_get_constant_pool");
   if (cap->can_pop_frame)
-    tty->print_cr("can_pop_frame");
+    log_trace(jvmti)("can_pop_frame");
   if (cap->can_force_early_return)
-    tty->print_cr("can_force_early_return");
+    log_trace(jvmti)("can_force_early_return");
   if (cap->can_redefine_classes)
-    tty->print_cr("can_redefine_classes");
+    log_trace(jvmti)("can_redefine_classes");
   if (cap->can_retransform_classes)
-    tty->print_cr("can_retransform_classes");
+    log_trace(jvmti)("can_retransform_classes");
   if (cap->can_signal_thread)
-    tty->print_cr("can_signal_thread");
+    log_trace(jvmti)("can_signal_thread");
   if (cap->can_get_source_file_name)
-    tty->print_cr("can_get_source_file_name");
+    log_trace(jvmti)("can_get_source_file_name");
   if (cap->can_get_line_numbers)
-    tty->print_cr("can_get_line_numbers");
+    log_trace(jvmti)("can_get_line_numbers");
   if (cap->can_get_source_debug_extension)
-    tty->print_cr("can_get_source_debug_extension");
+    log_trace(jvmti)("can_get_source_debug_extension");
   if (cap->can_access_local_variables)
-    tty->print_cr("can_access_local_variables");
+    log_trace(jvmti)("can_access_local_variables");
   if (cap->can_maintain_original_method_order)
-    tty->print_cr("can_maintain_original_method_order");
+    log_trace(jvmti)("can_maintain_original_method_order");
   if (cap->can_generate_single_step_events)
-    tty->print_cr("can_generate_single_step_events");
+    log_trace(jvmti)("can_generate_single_step_events");
   if (cap->can_generate_exception_events)
-    tty->print_cr("can_generate_exception_events");
+    log_trace(jvmti)("can_generate_exception_events");
   if (cap->can_generate_frame_pop_events)
-    tty->print_cr("can_generate_frame_pop_events");
+    log_trace(jvmti)("can_generate_frame_pop_events");
   if (cap->can_generate_breakpoint_events)
-    tty->print_cr("can_generate_breakpoint_events");
+    log_trace(jvmti)("can_generate_breakpoint_events");
   if (cap->can_suspend)
-    tty->print_cr("can_suspend");
+    log_trace(jvmti)("can_suspend");
   if (cap->can_redefine_any_class )
-    tty->print_cr("can_redefine_any_class");
+    log_trace(jvmti)("can_redefine_any_class");
   if (cap->can_retransform_any_class )
-    tty->print_cr("can_retransform_any_class");
+    log_trace(jvmti)("can_retransform_any_class");
   if (cap->can_get_current_thread_cpu_time)
-    tty->print_cr("can_get_current_thread_cpu_time");
+    log_trace(jvmti)("can_get_current_thread_cpu_time");
   if (cap->can_get_thread_cpu_time)
-    tty->print_cr("can_get_thread_cpu_time");
+    log_trace(jvmti)("can_get_thread_cpu_time");
   if (cap->can_generate_method_entry_events)
-    tty->print_cr("can_generate_method_entry_events");
+    log_trace(jvmti)("can_generate_method_entry_events");
   if (cap->can_generate_method_exit_events)
-    tty->print_cr("can_generate_method_exit_events");
+    log_trace(jvmti)("can_generate_method_exit_events");
   if (cap->can_generate_all_class_hook_events)
-    tty->print_cr("can_generate_all_class_hook_events");
+    log_trace(jvmti)("can_generate_all_class_hook_events");
   if (cap->can_generate_compiled_method_load_events)
-    tty->print_cr("can_generate_compiled_method_load_events");
+    log_trace(jvmti)("can_generate_compiled_method_load_events");
   if (cap->can_generate_monitor_events)
-    tty->print_cr("can_generate_monitor_events");
+    log_trace(jvmti)("can_generate_monitor_events");
   if (cap->can_generate_vm_object_alloc_events)
-    tty->print_cr("can_generate_vm_object_alloc_events");
+    log_trace(jvmti)("can_generate_vm_object_alloc_events");
   if (cap->can_generate_native_method_bind_events)
-    tty->print_cr("can_generate_native_method_bind_events");
+    log_trace(jvmti)("can_generate_native_method_bind_events");
   if (cap->can_generate_garbage_collection_events)
-    tty->print_cr("can_generate_garbage_collection_events");
+    log_trace(jvmti)("can_generate_garbage_collection_events");
   if (cap->can_generate_object_free_events)
-    tty->print_cr("can_generate_object_free_events");
+    log_trace(jvmti)("can_generate_object_free_events");
   if (cap->can_generate_resource_exhaustion_heap_events)
-    tty->print_cr("can_generate_resource_exhaustion_heap_events");
+    log_trace(jvmti)("can_generate_resource_exhaustion_heap_events");
   if (cap->can_generate_resource_exhaustion_threads_events)
-    tty->print_cr("can_generate_resource_exhaustion_threads_events");
+    log_trace(jvmti)("can_generate_resource_exhaustion_threads_events");
   if (cap->can_generate_early_vmstart)
-    tty->print_cr("can_generate_early_vmstart");
+    log_trace(jvmti)("can_generate_early_vmstart");
 }
 
 #endif
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -146,11 +146,7 @@
     _size_index = size_index;
     _size = initial_size;
     _entry_count = 0;
-    if (TraceJVMTIObjectTagging) {
-      _trace_threshold = initial_trace_threshold;
-    } else {
-      _trace_threshold = -1;
-    }
+    _trace_threshold = initial_trace_threshold;
     _load_factor = load_factor;
     _resize_threshold = (int)(_load_factor * _size);
     _resizing_enabled = true;
@@ -329,8 +325,7 @@
     }
 
     _entry_count++;
-    if (trace_threshold() > 0 && entry_count() >= trace_threshold()) {
-      assert(TraceJVMTIObjectTagging, "should only get here when tracing");
+    if (log_is_enabled(Debug, jvmti, objecttagging) && entry_count() >= trace_threshold()) {
       print_memory_usage();
       compute_next_trace_threshold();
     }
@@ -409,6 +404,7 @@
 
 // compute threshold for the next trace message
 void JvmtiTagHashmap::compute_next_trace_threshold() {
+  _trace_threshold = entry_count();
   if (trace_threshold() < medium_trace_threshold) {
     _trace_threshold += small_trace_threshold;
   } else {
@@ -3413,12 +3409,6 @@
     delayed_add = next;
   }
 
-  // stats
-  if (TraceJVMTIObjectTagging) {
-    int post_total = hashmap->_entry_count;
-    int pre_total = post_total + freed;
-
-    tty->print_cr("(%d->%d, %d freed, %d total moves)",
-        pre_total, post_total, freed, moved);
-  }
+  log_debug(jvmti, objecttagging)("(%d->%d, %d freed, %d total moves)",
+                                  hashmap->_entry_count + freed, hashmap->_entry_count, freed, moved);
 }
--- a/hotspot/src/share/vm/prims/jvmtiThreadState.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiThreadState.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -224,18 +224,11 @@
   RegisterMap reg_map(get_thread());
   javaVFrame *jvf = get_thread()->last_java_vframe(&reg_map);
   int n = 0;
-  // tty->print_cr("CSD: counting frames on %s ...",
-  //               JvmtiTrace::safe_get_thread_name(get_thread()));
   while (jvf != NULL) {
     Method* method = jvf->method();
-    // tty->print_cr("CSD: frame - method %s.%s - loc %d",
-    //               method->klass_name()->as_C_string(),
-    //               method->name()->as_C_string(),
-    //               jvf->bci() );
     jvf = jvf->java_sender();
     n++;
   }
-  // tty->print_cr("CSD: frame count: %d", n);
   return n;
 }
 
--- a/hotspot/src/share/vm/prims/jvmtiTrace.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiTrace.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/log.hpp"
+#include "logging/logConfiguration.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jvmtiTrace.hpp"
 
@@ -80,6 +82,17 @@
   } else {
     curr = "";  // hack in fixed tracing here
   }
+
+  // Enable UL for JVMTI tracing
+  if (strlen(curr) > 0) {
+    if (!log_is_enabled(Trace, jvmti)) {
+      log_warning(arguments)("-XX:+TraceJVMTI specified, "
+         "but no log output configured for the 'jvmti' tag on Trace level. "
+         "Defaulting to -Xlog:jvmti=trace");
+      LogConfiguration::configure_stdout(LogLevel::Trace, true, LOG_TAGS(jvmti));
+    }
+  }
+
   very_end = curr + strlen(curr);
   while (curr < very_end) {
     const char *curr_end = strchr(curr, ',');
@@ -127,7 +140,7 @@
         bits |= SHOW_EVENT_SENT;
         break;
       default:
-        tty->print_cr("Invalid trace flag '%c'", *flags);
+        log_warning(jvmti)("Invalid trace flag '%c'", *flags);
         break;
       }
     }
@@ -152,7 +165,7 @@
       domain = ALL_EVENT | EVENT;
     } else if (len==2 && strncmp(curr, "ec", 2)==0) {
       _trace_event_controller = true;
-      tty->print_cr("JVMTI Tracing the event controller");
+      log_trace(jvmti)("Tracing the event controller");
     } else {
       domain = FUNC | EVENT;  // go searching
     }
@@ -161,9 +174,9 @@
     if (domain & FUNC) {
       if (domain & ALL_FUNC) {
         if (domain & EXCLUDE) {
-          tty->print("JVMTI Tracing all significant functions");
+          log_trace(jvmti)("Tracing all significant functions");
         } else {
-          tty->print_cr("JVMTI Tracing all functions");
+          log_trace(jvmti)("Tracing all functions");
         }
       }
       for (int i = 0; i <= _max_function_index; ++i) {
@@ -178,7 +191,7 @@
             if (fname != NULL) {
               size_t fnlen = strlen(fname);
               if (len==fnlen && strncmp(curr, fname, fnlen)==0) {
-                tty->print_cr("JVMTI Tracing the function: %s", fname);
+                log_trace(jvmti)("Tracing the function: %s", fname);
                 do_op = true;
               }
             }
@@ -196,7 +209,7 @@
     }
     if (domain & EVENT) {
       if (domain & ALL_EVENT) {
-        tty->print_cr("JVMTI Tracing all events");
+        log_trace(jvmti)("Tracing all events");
       }
       for (int i = 0; i <= _max_event_index; ++i) {
         bool do_op = false;
@@ -207,7 +220,7 @@
           if (ename != NULL) {
             size_t evtlen = strlen(ename);
             if (len==evtlen && strncmp(curr, ename, evtlen)==0) {
-              tty->print_cr("JVMTI Tracing the event: %s", ename);
+              log_trace(jvmti)("Tracing the event: %s", ename);
               do_op = true;
             }
           }
@@ -223,7 +236,7 @@
       }
     }
     if (!_on && (domain & (FUNC|EVENT))) {
-      tty->print_cr("JVMTI Trace domain not found");
+      log_warning(jvmti)("Trace domain not found");
     }
     curr = curr_end + 1;
   }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -420,6 +420,7 @@
   { "TraceLoaderConstraints",    LogLevel::Info,  true,  LOG_TAGS(classload, constraints) },
   { "TraceMonitorInflation",     LogLevel::Debug, true,  LOG_TAGS(monitorinflation) },
   { "TraceSafepointCleanupTime", LogLevel::Info,  true,  LOG_TAGS(safepointcleanup) },
+  { "TraceJVMTIObjectTagging",   LogLevel::Debug, true,  LOG_TAGS(jvmti, objecttagging) },
   { NULL,                        LogLevel::Off,   false, LOG_TAGS(_NO_TAG) }
 };
 
@@ -998,7 +999,7 @@
   int max_tags = sizeof(tagSet)/sizeof(tagSet[0]);
   for (int i = 0; i < max_tags && tagSet[i] != LogTag::__NO_TAG; i++) {
     if (i > 0) {
-      strncat(tagset_buffer, ",", max_tagset_len - strlen(tagset_buffer));
+      strncat(tagset_buffer, "+", max_tagset_len - strlen(tagset_buffer));
     }
     strncat(tagset_buffer, LogTag::name(tagSet[i]), max_tagset_len - strlen(tagset_buffer));
   }
@@ -2095,8 +2096,8 @@
   }
 
 #if INCLUDE_ALL_GCS
-  if (G1ConcRefinementThreads == 0) {
-    FLAG_SET_DEFAULT(G1ConcRefinementThreads, ParallelGCThreads);
+  if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
+    FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
   }
 #endif
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -662,11 +662,11 @@
   /* CompactHashTable */                                                                                                             \
   /********************/                                                                                                             \
                                                                                                                                      \
-  nonstatic_field(SymbolCompactHashTable,      _base_address,                                 uintx)                                 \
-  nonstatic_field(SymbolCompactHashTable,      _entry_count,                                  juint)                                 \
-  nonstatic_field(SymbolCompactHashTable,      _bucket_count,                                 juint)                                 \
-  nonstatic_field(SymbolCompactHashTable,      _table_end_offset,                             juint)                                 \
-  nonstatic_field(SymbolCompactHashTable,      _buckets,                                      juint*)                                \
+  nonstatic_field(SymbolCompactHashTable,      _base_address,                                 address)                               \
+  nonstatic_field(SymbolCompactHashTable,      _entry_count,                                  u4)                                    \
+  nonstatic_field(SymbolCompactHashTable,      _bucket_count,                                 u4)                                    \
+  nonstatic_field(SymbolCompactHashTable,      _buckets,                                      u4*)                                   \
+  nonstatic_field(SymbolCompactHashTable,      _entries,                                      u4*)                                   \
                                                                                                                                      \
   /********************/                                                                                                             \
   /* SystemDictionary */                                                                                                             \
--- a/hotspot/src/share/vm/services/g1MemoryPool.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/services/g1MemoryPool.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -24,8 +24,6 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "services/g1MemoryPool.hpp"
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Wed Apr 20 11:11:56 2016 +0000
@@ -199,9 +199,6 @@
 const size_t G                  = M*K;
 const size_t HWperKB            = K / sizeof(HeapWord);
 
-const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint
-const jint max_jint = (juint)min_jint - 1;                     // 0x7FFFFFFF == largest jint
-
 // Constants for converting from a base unit to milli-base units.  For
 // example from seconds to milliseconds and microseconds
 
@@ -381,6 +378,14 @@
 typedef jint   s4;
 typedef jlong  s8;
 
+const jbyte min_jbyte = -(1 << 7);       // smallest jbyte
+const jbyte max_jbyte = (1 << 7) - 1;    // largest jbyte
+const jshort min_jshort = -(1 << 15);    // smallest jshort
+const jshort max_jshort = (1 << 15) - 1; // largest jshort
+
+const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint
+const jint max_jint = (juint)min_jint - 1;                     // 0x7FFFFFFF == largest jint
+
 //----------------------------------------------------------------------------------------------------
 // JVM spec restrictions
 
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Wed Apr 20 11:11:56 2016 +0000
@@ -28,6 +28,7 @@
 #include "compiler/compileBroker.hpp"
 #include "compiler/disassembler.hpp"
 #include "gc/shared/collectedHeap.hpp"
+#include "logging/logConfiguration.hpp"
 #include "prims/whitebox.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/atomic.inline.hpp"
@@ -774,6 +775,13 @@
        st->cr();
      }
 
+  STEP(395, "(printing log configuration)")
+    if (_verbose){
+      st->print_cr("Logging:");
+      LogConfiguration::describe_current_configuration(st);
+      st->cr();
+    }
+
   STEP(400, "(printing all environment variables)" )
 
      if (_verbose) {
@@ -937,6 +945,11 @@
     st->cr();
   }
 
+  // STEP("(printing log configuration)")
+  st->print_cr("Logging:");
+  LogConfiguration::describe(st);
+  st->cr();
+
   // STEP("(printing all environment variables)")
 
   os::print_environment_variables(st, env_list);
--- a/hotspot/test/gc/arguments/TestG1ConcRefinementThreads.java	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/test/gc/arguments/TestG1ConcRefinementThreads.java	Wed Apr 20 11:11:56 2016 +0000
@@ -38,7 +38,7 @@
 
 public class TestG1ConcRefinementThreads {
 
-  static final int AUTO_SELECT_THREADS_COUNT = 0;
+  static final int AUTO_SELECT_THREADS_COUNT = -1;
   static final int PASSED_THREADS_COUNT = 11;
 
   public static void main(String args[]) throws Exception {
@@ -49,8 +49,8 @@
 
     // zero setting case
     runG1ConcRefinementThreadsTest(
-        new String[]{"-XX:G1ConcRefinementThreads=0"}, // automatically selected
-        AUTO_SELECT_THREADS_COUNT /* set to zero */);
+        new String[]{"-XX:G1ConcRefinementThreads=0"},
+        0);
 
     // non-zero sestting case
     runG1ConcRefinementThreadsTest(
@@ -77,7 +77,7 @@
   private static void checkG1ConcRefinementThreadsConsistency(String output, int expectedValue) {
     int actualValue = getIntValue("G1ConcRefinementThreads", output);
 
-    if (expectedValue == 0) {
+    if (expectedValue == AUTO_SELECT_THREADS_COUNT) {
       // If expectedValue is automatically selected, set it same as ParallelGCThreads.
       expectedValue = getIntValue("ParallelGCThreads", output);
     }
--- a/hotspot/test/gc/g1/TestRegionLivenessPrint.java	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/test/gc/g1/TestRegionLivenessPrint.java	Wed Apr 20 11:11:56 2016 +0000
@@ -32,7 +32,7 @@
  * @build TestRegionLivenessPrint
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+WhiteBoxAPI -XX:+UseG1GC -Xmx128M -XX:G1HeapRegionSize=1m -Xlog:gc+liveness=trace TestRegionLivenessPrint
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC -Xmx128M -XX:G1HeapRegionSize=1m -Xlog:gc+liveness=trace TestRegionLivenessPrint
  */
 
 import sun.hotspot.WhiteBox;
--- a/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Wed Apr 20 11:11:56 2016 +0000
@@ -125,7 +125,7 @@
 
         // test with sizes which just meet the minimum required sizes
         // the following tests also attempt to use the shared archive
-        new SharedSizeTestData(Region.RO, Platform.is64bit() ? "9M":"8M", Result.VALID_ARCHIVE),
+        new SharedSizeTestData(Region.RO, Platform.is64bit() ? "10M":"9M", Result.VALID_ARCHIVE),
         new SharedSizeTestData(Region.RW, Platform.is64bit() ? "12M":"7M", Result.VALID_ARCHIVE),
         new SharedSizeTestData(Region.MD, Platform.is64bit() ? "4M":"2M", Result.VALID_ARCHIVE),
         new SharedSizeTestData(Region.MC, "120k", Result.VALID_ARCHIVE),
@@ -176,7 +176,7 @@
                                output.getOutput().contains("Unable to reserve shared space at required address")) &&
                                output.getExitValue() == 1) {
                                System.out.println("Unable to use shared archive: test not executed; assumed passed");
-                               return;
+                               continue;
                           }
                       }
                       output.shouldHaveExitValue(0);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/SASymbolTableTest.java	Wed Apr 20 11:11:56 2016 +0000
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SASymbolTableTest
+ * @summary Walk symbol table using SA, with and without CDS.
+ * @library /testlibrary
+ * @modules java.base/jdk.internal.misc
+ *          jdk.hotspot.agent/sun.jvm.hotspot.oops
+ *          jdk.hotspot.agent/sun.jvm.hotspot.memory
+ *          jdk.hotspot.agent/sun.jvm.hotspot.runtime
+ *          jdk.hotspot.agent/sun.jvm.hotspot.tools
+ *          java.management
+ * @build SASymbolTableTestAgent SASymbolTableTestAttachee jdk.test.lib.*
+ * @run main SASymbolTableTest
+ */
+
+import jdk.test.lib.*;
+
+/*
+ * The purpose of this test is to validate that we can use SA to
+ * attach a process and walk its SymbolTable, regardless whether
+ * the attachee process runs in CDS mode or not.
+ *
+ * SASymbolTableTest Just sets up the agent and attachee processes.
+ * The SymbolTable walking is done in the SASymbolTableTestAgent class.
+ */
+public class SASymbolTableTest {
+    static String jsaName = "./SASymbolTableTest.jsa";
+
+    public static void main(String[] args) throws Exception {
+        if (!Platform.shouldSAAttach()) {
+            System.out.println("SA attach not expected to work - test skipped.");
+            return;
+        }
+        createArchive();
+        run(true);
+        run(false);
+    }
+
+    private static void createArchive()  throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=" + jsaName,
+            "-Xshare:dump");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Loading classes to share");
+        output.shouldHaveExitValue(0);
+    }
+
+    private static void run(boolean useArchive) throws Exception {
+        String flag = useArchive ? "auto" : "off";
+
+        // (1) Launch the attachee process
+        ProcessBuilder attachee = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=" + jsaName,
+            "-Xshare:" + flag,
+            "-showversion",                // so we can see "sharing" in the output
+            "SASymbolTableTestAttachee");
+
+        final Process p = attachee.start();
+
+        // (2) Launch the agent process
+        long pid = p.getPid();
+        System.out.println("Attaching agent " + pid);
+        ProcessBuilder tool = ProcessTools.createJavaProcessBuilder(
+            "-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.oops=ALL-UNNAMED",
+            "-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.memory=ALL-UNNAMED",
+            "-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.runtime=ALL-UNNAMED",
+            "-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.tools=ALL-UNNAMED",
+            "SASymbolTableTestAgent",
+            Long.toString(pid));
+        OutputAnalyzer output = ProcessTools.executeProcess(tool);
+        System.out.println(output.getOutput());
+        output.shouldHaveExitValue(0);
+
+        Thread t = new Thread() {
+                public void run() {
+                    try {
+                        OutputAnalyzer output = new OutputAnalyzer(p);
+                        System.out.println("STDOUT[");
+                        System.out.print(output.getStdout());
+                        System.out.println("]");
+                        System.out.println("STDERR[");
+                        System.out.print(output.getStderr());
+                        System.out.println("]");
+                    } catch (Throwable t) {
+                        t.printStackTrace();
+                    }
+                }
+            };
+        t.start();
+
+        Thread.sleep(2 * 1000);
+        p.destroy();
+        t.join();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/SASymbolTableTestAgent.java	Wed Apr 20 11:11:56 2016 +0000
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.jvm.hotspot.memory.SymbolTable;
+import sun.jvm.hotspot.oops.Symbol;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.tools.Tool;
+
+/**
+ * This class is launched in a sub-process by the main test,
+ * SASymbolTableTest.java.
+ *
+ * It uses SA to connect to another JVM process, whose PID is specified in args[].
+ * The purpose of the test is to validate that we can walk the SymbolTable
+ * and CompactHashTable of the other process. Everything should work regardless
+ * of whether the other process runs in CDS mode or not.
+ *
+ * Note: CompactHashTable is used only when CDS is enabled.
+ */
+public class SASymbolTableTestAgent extends Tool {
+    public SASymbolTableTestAgent() {
+        super();
+    }
+    public static void main(String args[]) {
+        SASymbolTableTestAgent tool = new SASymbolTableTestAgent();
+        tool.execute(args);
+    }
+
+    static String[] commonNames = {
+        "java/lang/Object",
+        "java/lang/String",
+        "java/lang/Class",
+        "java/lang/Cloneable",
+        "java/lang/ClassLoader",
+        "java/io/Serializable",
+        "java/lang/System",
+        "java/lang/Throwable",
+        "java/lang/Error",
+        "java/lang/ThreadDeath",
+        "java/lang/Exception",
+        "java/lang/RuntimeException",
+        "java/lang/SecurityManager",
+        "java/security/ProtectionDomain",
+        "java/security/AccessControlContext",
+        "java/security/SecureClassLoader",
+        "java/lang/ClassNotFoundException",
+        "java/lang/NoClassDefFoundError",
+        "java/lang/LinkageError",
+        "java/lang/ClassCastException",
+        "java/lang/ArrayStoreException",
+        "java/lang/VirtualMachineError",
+        "java/lang/OutOfMemoryError",
+        "java/lang/StackOverflowError",
+        "java/lang/IllegalMonitorStateException",
+        "java/lang/ref/Reference",
+        "java/lang/ref/SoftReference",
+        "java/lang/ref/WeakReference",
+        "java/lang/ref/FinalReference",
+        "java/lang/ref/PhantomReference",
+        "java/lang/ref/Finalizer",
+        "java/lang/Thread",
+        "java/lang/ThreadGroup",
+        "java/util/Properties",
+        "java/lang/reflect/AccessibleObject",
+        "java/lang/reflect/Field",
+        "java/lang/reflect/Method",
+        "java/lang/reflect/Constructor",
+        "java/lang/invoke/MethodHandle",
+        "java/lang/invoke/MemberName",
+        "java/lang/invoke/MethodHandleNatives",
+        "java/lang/invoke/MethodType",
+        "java/lang/BootstrapMethodError",
+        "java/lang/invoke/CallSite",
+        "java/lang/invoke/ConstantCallSite",
+        "java/lang/invoke/MutableCallSite",
+        "java/lang/invoke/VolatileCallSite",
+        "java/lang/StringBuffer",
+        "java/lang/StringBuilder",
+        "java/io/ByteArrayInputStream",
+        "java/io/File",
+        "java/net/URLClassLoader",
+        "java/net/URL",
+        "java/util/jar/Manifest",
+        "java/security/CodeSource",
+    };
+
+    static String[] badNames = {
+        "java/lang/badbadbad",
+        "java/io/badbadbadbad",
+        "this*symbol*must*not*exist"
+    };
+
+    public void run() {
+        System.out.println("SASymbolTableTestAgent: starting");
+        VM vm = VM.getVM();
+        SymbolTable table = vm.getSymbolTable();
+
+        // (a) These are names that are likely to exist in the symbol table
+        //     of a JVM after start-up. They were taken from vmSymbols.hpp
+        //     during the middle of JDK9 development.
+        //
+        //     The purpose is not to check that each name must exist (a future
+        //     version of JDK may not preload some of the classes).
+        //
+        //     The purpose of this loops is to ensure that we check a lot of symbols,
+        //     so we will (most likely) hit on both VALUE_ONLY_BUCKET_TYPE and normal bucket type
+        //     in CompactHashTable.probe().
+        for (String n : commonNames) {
+            Symbol s = table.probe(n);
+            System.out.format("%-40s = %s\n", n, s);
+        }
+
+        System.out.println("======================================================================");
+
+        // (b) Also test a few strings that are known to not exist in the table. This will
+        //     both the compact table (if it exists) and the regular table to be walked.
+        for (String n : badNames) {
+            Symbol s = table.probe(n);
+            System.out.format("%-40s = %s\n", n, s);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/SASymbolTableTestAttachee.java	Wed Apr 20 11:11:56 2016 +0000
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * This class is launched in a sub-process by the main test,
+ * SASymbolTableTest.java.
+ *
+ * This class does nothing in particular. It just sleeps for 120
+ * seconds so SASymbolTableTestAgent can have a chance to examine its
+ * SymbolTable. This process should be killed by the parent process
+ * after SASymbolTableTestAgent has completed testing.
+ */
+public class SASymbolTableTestAttachee {
+    public static void main(String args[]) throws Throwable {
+        System.out.println("SASymbolTableTestAttachee: sleeping to wait for SA tool to attach ...");
+        Thread.sleep(120 * 1000);
+    }
+}
--- a/hotspot/test/runtime/modules/AccessCheck/ExportAllUnnamed.java	Wed Apr 20 11:05:28 2016 +0000
+++ b/hotspot/test/runtime/modules/AccessCheck/ExportAllUnnamed.java	Wed Apr 20 11:11:56 2016 +0000
@@ -28,11 +28,12 @@
  * @summary Test if package p2 in module m2 is exported to all unnamed,
  *          then class p1.c1 in an unnamed module can read p2.c2 in module m2.
  * @library /testlibrary /test/lib
+ * @modules java.base/jdk.internal.module
  * @compile myloaders/MySameClassLoader.java
  * @compile p2/c2.java
  * @compile p1/c1.java
- * @compile -XaddExports:java.base/jdk.internal.module=ALL-UNNAMED ExportAllUnnamed.java
- * @run main/othervm -XaddExports:java.base/jdk.internal.module=ALL-UNNAMED -Xbootclasspath/a:. ExportAllUnnamed
+ * @build ExportAllUnnamed
+ * @run main/othervm -Xbootclasspath/a:. ExportAllUnnamed
  */
 
 import static jdk.test.lib.Asserts.*;