Merge
authorduke
Wed, 05 Jul 2017 17:53:04 +0200
changeset 10713 f4cc3ff76295
parent 10712 0c1ab928e08c (current diff)
parent 10686 7eba342b9ddf (diff)
child 10715 dfdd0a7cd9b5
Merge
--- a/.hgtags-top-repo	Mon Oct 17 19:06:53 2011 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 17:53:04 2017 +0200
@@ -130,3 +130,4 @@
 28cf2aec4dd7c3c75efc1c15078522467c781a6d jdk8-b06
 0db7ae9f2b1017124c779bccd016c976928859a0 jdk8-b07
 fb1bc13260d76447e269e843859eb593fe2a8ab2 jdk8-b08
+8adb70647b5af5273dfe6a540f07be667cd50216 jdk8-b09
--- a/hotspot/.hgtags	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 17:53:04 2017 +0200
@@ -181,7 +181,7 @@
 dce7d24674f4d0bed00de24f00119057fdce7cfb jdk8-b05
 0db80d8e77fccddf5e6fa49963226b54ac7d0f62 jdk8-b06
 3f0cf875af83f55ec5e1a5cea80455315f9322a2 jdk8-b07
-3a2fb61165dfc72e6de2adab1f2649a408f5e577 hs22-b01
+0cc8a70952c368e06de2adab1f2649a408f5e577 hs22-b01
 7c29742c41b44fb0cd5a13c7ac8834f3f2ca649e hs22-b02
 3a2fb61165dfc72e398179a2796d740c8da5b8c0 hs22-b03
 ce9bde819dcba4a5d2822229d9183e69c74326ca hs22-b04
@@ -189,3 +189,5 @@
 650d15d8f37255d3b805aa00c5bd1c30984b203d hs22-b06
 da883b9e6d3788057f9577e72712998ed82c9b7e hs23-b01
 49ed7eacfd16616166ff066493143889741097af jdk8-b08
+7c20d272643f47195478708eff593a9cce40fec4 jdk8-b09
+e4f412d2b75d2c797acff965aa2c420e3d358f09 hs23-b02
--- a/hotspot/agent/make/Makefile	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/agent/make/Makefile	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,7 @@
 sun.jvm.hotspot.debugger.windbg.x86 \
 sun.jvm.hotspot.debugger.x86 \
 sun.jvm.hotspot.gc_implementation \
+sun.jvm.hotspot.gc_implementation.g1 \
 sun.jvm.hotspot.gc_implementation.parallelScavenge \
 sun.jvm.hotspot.gc_implementation.shared \
 sun.jvm.hotspot.gc_interface \
@@ -176,6 +177,9 @@
 sun/jvm/hotspot/debugger/windbg/ia64/*.java \
 sun/jvm/hotspot/debugger/windbg/x86/*.java \
 sun/jvm/hotspot/debugger/x86/*.java \
+sun/jvm/hotspot/gc_implementation/g1/*.java \
+sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
+sun/jvm/hotspot/gc_implementation/shared/*.java \
 sun/jvm/hotspot/interpreter/*.java \
 sun/jvm/hotspot/jdi/*.java \
 sun/jvm/hotspot/livejvm/*.java \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Wed Jul 05 17:53:04 2017 +0200
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.gc_interface.CollectedHeapName;
+import sun.jvm.hotspot.memory.MemRegion;
+import sun.jvm.hotspot.memory.SharedHeap;
+import sun.jvm.hotspot.memory.SpaceClosure;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1CollectedHeap.
+
+public class G1CollectedHeap extends SharedHeap {
+    // HeapRegionSeq _seq;
+    static private long hrsFieldOffset;
+    // MemRegion _g1_committed;
+    static private long g1CommittedFieldOffset;
+    // size_t _summary_bytes_used;
+    static private CIntegerField summaryBytesUsedField;
+    // G1MonitoringSupport* _g1mm
+    static private AddressField g1mmField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1CollectedHeap");
+
+        hrsFieldOffset = type.getField("_hrs").getOffset();
+        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
+        summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+        g1mmField = type.getAddressField("_g1mm");
+    }
+
+    public long capacity() {
+        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
+        MemRegion g1_committed = new MemRegion(g1CommittedAddr);
+        return g1_committed.byteSize();
+    }
+
+    public long used() {
+        return summaryBytesUsedField.getValue(addr);
+    }
+
+    public long n_regions() {
+        return hrs().length();
+    }
+
+    private HeapRegionSeq hrs() {
+        Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
+        return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
+                                                         hrsAddr);
+    }
+
+    public G1MonitoringSupport g1mm() {
+        Address g1mmAddr = g1mmField.getValue(addr);
+        return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
+    }
+
+    private Iterator<HeapRegion> heapRegionIterator() {
+        return hrs().heapRegionIterator();
+    }
+
+    public void heapRegionIterate(SpaceClosure scl) {
+        Iterator<HeapRegion> iter = heapRegionIterator();
+        while (iter.hasNext()) {
+            HeapRegion hr = iter.next();
+            scl.doSpace(hr);
+        }
+    }
+
+    public CollectedHeapName kind() {
+        return CollectedHeapName.G1_COLLECTED_HEAP;
+    }
+
+    public G1CollectedHeap(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Wed Jul 05 17:53:04 2017 +0200
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1MonitoringSupport.
+
+public class G1MonitoringSupport extends VMObject {
+    // size_t _eden_committed;
+    static private CIntegerField edenCommittedField;
+    // size_t _eden_used;
+    static private CIntegerField edenUsedField;
+    // size_t _survivor_committed;
+    static private CIntegerField survivorCommittedField;
+    // size_t _survivor_used;
+    static private CIntegerField survivorUsedField;
+    // size_t _old_committed;
+    static private CIntegerField oldCommittedField;
+    // size_t _old_used;
+    static private CIntegerField oldUsedField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1MonitoringSupport");
+
+        edenCommittedField = type.getCIntegerField("_eden_committed");
+        edenUsedField = type.getCIntegerField("_eden_used");
+        survivorCommittedField = type.getCIntegerField("_survivor_committed");
+        survivorUsedField = type.getCIntegerField("_survivor_used");
+        oldCommittedField = type.getCIntegerField("_old_committed");
+        oldUsedField = type.getCIntegerField("_old_used");
+    }
+
+    public long edenCommitted() {
+        return edenCommittedField.getValue(addr);
+    }
+
+    public long edenUsed() {
+        return edenUsedField.getValue(addr);
+    }
+
+    public long survivorCommitted() {
+        return survivorCommittedField.getValue(addr);
+    }
+
+    public long survivorUsed() {
+        return survivorUsedField.getValue(addr);
+    }
+
+    public long oldCommitted() {
+        return oldCommittedField.getValue(addr);
+    }
+
+    public long oldUsed() {
+        return oldUsedField.getValue(addr);
+    }
+
+    public G1MonitoringSupport(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Wed Jul 05 17:53:04 2017 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.memory.ContiguousSpace;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegion. Currently we don't actually include
+// any of its fields but only iterate over it (which we get "for free"
+// as HeapRegion ultimately inherits from ContiguousSpace).
+
+public class HeapRegion extends ContiguousSpace {
+    // static int GrainBytes;
+    static private CIntegerField grainBytesField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegion");
+
+        grainBytesField = type.getCIntegerField("GrainBytes");
+    }
+
+    static public long grainBytes() {
+        return grainBytesField.getValue();
+    }
+
+    public HeapRegion(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Wed Jul 05 17:53:04 2017 +0200
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
+
+public class HeapRegionSeq extends VMObject {
+    // HeapRegion** _regions;
+    static private AddressField regionsField;
+    // size_t _length;
+    static private CIntegerField lengthField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegionSeq");
+
+        regionsField = type.getAddressField("_regions");
+        lengthField = type.getCIntegerField("_length");
+    }
+
+    private HeapRegion at(long index) {
+        Address arrayAddr = regionsField.getValue(addr);
+        // Offset of &_region[index]
+        long offset = index * VM.getVM().getAddressSize();
+        Address regionAddr = arrayAddr.getAddressAt(offset);
+        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
+                                                      regionAddr);
+    }
+
+    public long length() {
+        return lengthField.getValue(addr);
+    }
+
+    private class HeapRegionIterator implements Iterator<HeapRegion> {
+        private long index;
+        private long length;
+
+        @Override
+        public boolean hasNext() { return index < length; }
+
+        @Override
+        public HeapRegion next() { return at(index++);    }
+
+        @Override
+        public void remove()     { /* not supported */    }
+
+        HeapRegionIterator(Address addr) {
+            index = 0;
+            length = length();
+        }
+    }
+
+    public Iterator<HeapRegion> heapRegionIterator() {
+        return new HeapRegionIterator(addr);
+    }
+
+    public HeapRegionSeq(Address addr) {
+        super(addr);
+    }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
   public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
   public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
   public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
+  public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
   public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
 
   public String toString() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.G1CollectedHeap;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.types.*;
@@ -72,6 +73,7 @@
     heapConstructor = new VirtualConstructor(db);
     heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class);
     heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
+    heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
 
     mainThreadGroupField   = type.getOopField("_main_thread_group");
     systemThreadGroupField = type.getOopField("_system_thread_group");
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Wed Jul 05 17:53:04 2017 +0200
@@ -33,6 +33,7 @@
 
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.*;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.runtime.*;
@@ -514,9 +515,16 @@
 
   private void addPermGenLiveRegions(List output, CollectedHeap heap) {
     LiveRegionsCollector lrc = new LiveRegionsCollector(output);
-    if (heap instanceof GenCollectedHeap) {
-       GenCollectedHeap genHeap = (GenCollectedHeap) heap;
-       Generation gen = genHeap.permGen();
+    if (heap instanceof SharedHeap) {
+       if (Assert.ASSERTS_ENABLED) {
+          Assert.that(heap instanceof GenCollectedHeap ||
+                      heap instanceof G1CollectedHeap,
+                      "Expecting GenCollectedHeap or G1CollectedHeap, " +
+                      "but got " + heap.getClass().getName());
+       }
+       // Handles both GenCollectedHeap and G1CollectedHeap
+       SharedHeap sharedHeap = (SharedHeap) heap;
+       Generation gen = sharedHeap.permGen();
        gen.spaceIterate(lrc, true);
     } else if (heap instanceof ParallelScavengeHeap) {
        ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
@@ -524,8 +532,9 @@
        addLiveRegions(permGen.objectSpace().getLiveRegions(), output);
     } else {
        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
-                             heap.getClass().getName());
+          Assert.that(false,
+                      "Expecting SharedHeap or ParallelScavengeHeap, " +
+                      "but got " + heap.getClass().getName());
        }
     }
   }
@@ -588,10 +597,14 @@
        addLiveRegions(youngGen.fromSpace().getLiveRegions(), liveRegions);
        PSOldGen oldGen = psh.oldGen();
        addLiveRegions(oldGen.objectSpace().getLiveRegions(), liveRegions);
+    } else if (heap instanceof G1CollectedHeap) {
+        G1CollectedHeap g1h = (G1CollectedHeap) heap;
+        g1h.heapRegionIterate(lrc);
     } else {
        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
-                              heap.getClass().getName());
+          Assert.that(false, "Expecting GenCollectedHeap, G1CollectedHeap, " +
+                      "or ParallelScavengeHeap, but got " +
+                      heap.getClass().getName());
        }
     }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,11 @@
 
 import java.util.*;
 import sun.jvm.hotspot.gc_interface.*;
+import sun.jvm.hotspot.gc_implementation.g1.*;
 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
 import sun.jvm.hotspot.gc_implementation.shared.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.tools.*;
 
 public class HeapSummary extends Tool {
 
@@ -70,32 +70,45 @@
       System.out.println();
       System.out.println("Heap Usage:");
 
-      if (heap instanceof GenCollectedHeap) {
-         GenCollectedHeap genHeap = (GenCollectedHeap) heap;
-         for (int n = 0; n < genHeap.nGens(); n++) {
-            Generation gen = genHeap.getGen(n);
-            if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
-               System.out.println("New Generation (Eden + 1 Survivor Space):");
-               printGen(gen);
+      if (heap instanceof SharedHeap) {
+         SharedHeap sharedHeap = (SharedHeap) heap;
+         if (sharedHeap instanceof GenCollectedHeap) {
+            GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
+            for (int n = 0; n < genHeap.nGens(); n++) {
+               Generation gen = genHeap.getGen(n);
+               if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
+                  System.out.println("New Generation (Eden + 1 Survivor Space):");
+                  printGen(gen);
 
-               ContiguousSpace eden = ((DefNewGeneration)gen).eden();
-               System.out.println("Eden Space:");
-               printSpace(eden);
+                  ContiguousSpace eden = ((DefNewGeneration)gen).eden();
+                  System.out.println("Eden Space:");
+                  printSpace(eden);
+
+                  ContiguousSpace from = ((DefNewGeneration)gen).from();
+                  System.out.println("From Space:");
+                  printSpace(from);
 
-               ContiguousSpace from = ((DefNewGeneration)gen).from();
-               System.out.println("From Space:");
-               printSpace(from);
-
-               ContiguousSpace to = ((DefNewGeneration)gen).to();
-               System.out.println("To Space:");
-               printSpace(to);
-            } else {
-               System.out.println(gen.name() + ":");
-               printGen(gen);
+                  ContiguousSpace to = ((DefNewGeneration)gen).to();
+                  System.out.println("To Space:");
+                  printSpace(to);
+               } else {
+                  System.out.println(gen.name() + ":");
+                  printGen(gen);
+               }
             }
+         } else if (sharedHeap instanceof G1CollectedHeap) {
+             G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
+             G1MonitoringSupport g1mm = g1h.g1mm();
+             System.out.println("G1 Young Generation");
+             printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
+             printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
+             printG1Space("To Space:", 0, 0);
+             printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
+         } else {
+             throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
          }
-         // Perm generation
-         Generation permGen = genHeap.permGen();
+         // Perm generation shared by the above
+         Generation permGen = sharedHeap.permGen();
          System.out.println("Perm Generation:");
          printGen(permGen);
       } else if (heap instanceof ParallelScavengeHeap) {
@@ -119,7 +132,7 @@
          printValMB("free     = ", permFree);
          System.out.println(alignment + (double)permGen.used() * 100.0 / permGen.capacity() + "% used");
       } else {
-         throw new RuntimeException("unknown heap type : " + heap.getClass());
+         throw new RuntimeException("unknown CollectedHeap type : " + heap.getClass());
       }
    }
 
@@ -151,6 +164,14 @@
           return;
        }
 
+       l = getFlagValue("UseG1GC", flagMap);
+       if (l == 1L) {
+           System.out.print("Garbage-First (G1) GC ");
+           l = getFlagValue("ParallelGCThreads", flagMap);
+           System.out.println("with " + l + " thread(s)");
+           return;
+       }
+
        System.out.println("Mark Sweep Compact GC");
    }
 
@@ -191,6 +212,16 @@
       System.out.println(alignment +  (double)space.used() * 100.0 / space.capacity() + "% used");
    }
 
+   private void printG1Space(String spaceName, long used, long capacity) {
+      long free = capacity - used;
+      System.out.println(spaceName);
+      printValMB("capacity = ", capacity);
+      printValMB("used     = ", used);
+      printValMB("free     = ", free);
+      double occPerc = (capacity > 0) ? (double) used * 100.0 / capacity : 0.0;
+      System.out.println(alignment + occPerc + "% used");
+   }
+
    private static final double FACTOR = 1024*1024;
    private void printValMB(String title, long value) {
       if (value < 0) {
--- a/hotspot/make/hotspot_version	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/make/hotspot_version	Wed Jul 05 17:53:04 2017 +0200
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=23
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=01
+HS_BUILD_NUMBER=02
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/sa.files	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/make/sa.files	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,7 @@
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/g1/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \
 $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_interface/*.java \
--- a/hotspot/src/os/posix/launcher/launcher.script	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/os/posix/launcher/launcher.script	Wed Jul 05 17:53:04 2017 +0200
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -69,8 +69,8 @@
 #
 
 # Make sure the paths are fully specified, i.e. they must begin with /.
-SCRIPT=$(cd $(dirname $0) && pwd)/$(basename $0)
-RUNDIR=$(pwd)
+REL_MYDIR=`dirname $0`
+MYDIR=`cd $REL_MYDIR && pwd`
 
 # Look whether the user wants to run inside gdb
 case "$1" in
@@ -95,12 +95,9 @@
         ;;
 esac
 
-# Find out the absolute path to this script
-MYDIR=$(cd $(dirname $SCRIPT) && pwd)
-
 JDK=
 if [ "${ALT_JAVA_HOME}" = "" ]; then
-    source ${MYDIR}/jdkpath.sh
+    . ${MYDIR}/jdkpath.sh
 else 
     JDK=${ALT_JAVA_HOME%%/jre};
 fi
@@ -119,9 +116,6 @@
 JAVA_HOME=$JDK
 ARCH=@@LIBARCH@@
 
-# Find out the absolute path to this script
-MYDIR=$(cd $(dirname $SCRIPT) && pwd)
-
 SBP=${MYDIR}:${JRE}/lib/${ARCH}
 
 # Set up a suitable LD_LIBRARY_PATH
@@ -146,7 +140,7 @@
 fi
 
 GDBSRCDIR=$MYDIR
-BASEDIR=$(cd $MYDIR/../../.. && pwd)
+BASEDIR=`cd $MYDIR/../../.. && pwd`
 
 init_gdb() {
 # Create a gdb script in case we should run inside gdb
@@ -179,7 +173,7 @@
 	init_gdb
 # First find out what emacs version we're using, so that we can
 # use the new pretty GDB mode if emacs -version >= 22.1
-	case $($EMACS -version 2> /dev/null) in
+	case `$EMACS -version 2> /dev/null` in
 	    *GNU\ Emacs\ 2[23]*)
 	    emacs_gud_cmd="gdba"
 	    emacs_gud_args="--annotate=3"
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -2004,7 +2004,7 @@
   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
 
   ref_processor()->set_enqueuing_is_done(false);
-  ref_processor()->enable_discovery();
+  ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
   ref_processor()->setup_policy(clear_all_soft_refs);
   // If an asynchronous collection finishes, the _modUnionTable is
   // all clear.  If we are assuming the collection from an asynchronous
@@ -3490,8 +3490,8 @@
     MutexLockerEx x(bitMapLock(),
                     Mutex::_no_safepoint_check_flag);
     checkpointRootsInitialWork(asynch);
-    rp->verify_no_references_recorded();
-    rp->enable_discovery(); // enable ("weak") refs discovery
+    // enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
     _collectorState = Marking;
   } else {
     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
@@ -3503,7 +3503,8 @@
            "ref discovery for this generation kind");
     // already have locks
     checkpointRootsInitialWork(asynch);
-    rp->enable_discovery(); // now enable ("weak") refs discovery
+    // now enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
     _collectorState = Marking;
   }
   SpecializationStats::print();
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -118,30 +118,6 @@
   }
 }
 
-// this is a bit expensive... but we expect that it should not be called
-// to often.
-void CSetChooserCache::remove(HeapRegion *hr) {
-  assert(_occupancy > 0, "cache should not be empty");
-  assert(hr->sort_index() < -1, "should already be in the cache");
-  int index = get_index(hr->sort_index());
-  assert(_cache[index] == hr, "index should be correct");
-  int next_index = trim_index(index + 1);
-  int last_index = trim_index(_first + _occupancy - 1);
-  while (index != last_index) {
-    assert(_cache[next_index] != NULL, "should not be null");
-    _cache[index] = _cache[next_index];
-    _cache[index]->set_sort_index(get_sort_index(index));
-
-    index = next_index;
-    next_index = trim_index(next_index+1);
-  }
-  assert(index == last_index, "should have reached the last one");
-  _cache[index] = NULL;
-  hr->set_sort_index(-1);
-  --_occupancy;
-  assert(verify(), "cache should be consistent");
-}
-
 static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
   if (hr1 == NULL) {
     if (hr2 == NULL) return 0;
@@ -197,43 +173,34 @@
   HeapRegion *prev = NULL;
   while (index < _numMarkedRegions) {
     HeapRegion *curr = _markedRegions.at(index++);
-    if (curr != NULL) {
-      int si = curr->sort_index();
-      guarantee(!curr->is_young(), "should not be young!");
-      guarantee(si > -1 && si == (index-1), "sort index invariant");
-      if (prev != NULL) {
-        guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
-      }
-      prev = curr;
+    guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
+    int si = curr->sort_index();
+    guarantee(!curr->is_young(), "should not be young!");
+    guarantee(si > -1 && si == (index-1), "sort index invariant");
+    if (prev != NULL) {
+      guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
     }
+    prev = curr;
   }
   return _cache.verify();
 }
 #endif
 
-bool
-CollectionSetChooser::addRegionToCache() {
-  assert(!_cache.is_full(), "cache should not be full");
-
-  HeapRegion *hr = NULL;
-  while (hr == NULL && _curMarkedIndex < _numMarkedRegions) {
-    hr = _markedRegions.at(_curMarkedIndex++);
-  }
-  if (hr == NULL)
-    return false;
-  assert(!hr->is_young(), "should not be young!");
-  assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
-  _markedRegions.at_put(hr->sort_index(), NULL);
-  _cache.insert(hr);
-  assert(!_cache.is_empty(), "cache should not be empty");
-  assert(verify(), "cache should be consistent");
-  return false;
-}
-
 void
 CollectionSetChooser::fillCache() {
-  while (!_cache.is_full() && addRegionToCache()) {
+  while (!_cache.is_full() && (_curMarkedIndex < _numMarkedRegions)) {
+    HeapRegion* hr = _markedRegions.at(_curMarkedIndex);
+    assert(hr != NULL,
+           err_msg("Unexpected NULL hr in _markedRegions at index %d",
+                   _curMarkedIndex));
+    _curMarkedIndex += 1;
+    assert(!hr->is_young(), "should not be young!");
+    assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
+    _markedRegions.at_put(hr->sort_index(), NULL);
+    _cache.insert(hr);
+    assert(!_cache.is_empty(), "cache should not be empty");
   }
+  assert(verify(), "cache should be consistent");
 }
 
 void
@@ -334,20 +301,6 @@
   clearMarkedHeapRegions();
 }
 
-void CollectionSetChooser::removeRegion(HeapRegion *hr) {
-  int si = hr->sort_index();
-  assert(si == -1 || hr->is_marked(), "Sort index not valid.");
-  if (si > -1) {
-    assert(_markedRegions.at(si) == hr, "Sort index not valid." );
-    _markedRegions.at_put(si, NULL);
-  } else if (si < -1) {
-    assert(_cache.region_in_cache(hr), "should be in the cache");
-    _cache.remove(hr);
-    assert(hr->sort_index() == -1, "sort index invariant");
-  }
-  hr->set_sort_index(-1);
-}
-
 // if time_remaining < 0.0, then this method should try to return
 // a region, whether it fits within the remaining time or not
 HeapRegion*
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,26 @@
 #include "utilities/growableArray.hpp"
 
 // We need to sort heap regions by collection desirability.
+// This sorting is currently done in two "stages". An initial sort is
+// done following a cleanup pause as soon as all of the marked but
+// non-empty regions have been identified and the completely empty
+// ones reclaimed.
+// This gives us a global sort on a GC efficiency metric
+// based on predictive data available at that time. However,
+// any of these regions that are collected will only be collected
+// during a future GC pause, by which time it is possible that newer
+// data might allow us to revise and/or refine the earlier
+// pause predictions, leading to changes in expected gc efficiency
+// order. To somewhat mitigate this obsolescence, more so in the
+// case of regions towards the end of the list, which will be
+// picked later, these pre-sorted regions from the _markedRegions
+// array are not used as is, but a small prefix thereof is
+// insertion-sorted again into a small cache, based on more
+// recent remembered set information. Regions are then drawn
+// from this cache to construct the collection set at each
+// incremental GC.
+// This scheme and/or its implementation may be subject to
+// revision in the future.
 
 class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
 private:
@@ -37,8 +57,8 @@
   } PrivateConstants;
 
   HeapRegion*  _cache[CacheLength];
-  int          _occupancy; // number of region in cache
-  int          _first; // "first" region in the cache
+  int          _occupancy; // number of regions in cache
+  int          _first;     // (index of) "first" region in the cache
 
   // adding CacheLength to deal with negative values
   inline int trim_index(int index) {
@@ -62,7 +82,6 @@
   void clear(void);
   void insert(HeapRegion *hr);
   HeapRegion *remove_first(void);
-  void remove (HeapRegion *hr);
   inline HeapRegion *get_first(void) {
     return _cache[_first];
   }
@@ -102,7 +121,6 @@
 
   void sortMarkedHeapRegions();
   void fillCache();
-  bool addRegionToCache(void);
   void addMarkedHeapRegion(HeapRegion *hr);
 
   // Must be called before calls to getParMarkedHeapRegionChunk.
@@ -122,9 +140,6 @@
 
   void updateAfterFullCollection();
 
-  // Ensure that "hr" is not a member of the marked region array or the cache
-  void removeRegion(HeapRegion* hr);
-
   bool unmarked_age_1_returned_as_new() { return _unmarked_age_1_returned_as_new; }
 
   // Returns true if the used portion of "_markedRegions" is properly
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -818,10 +818,10 @@
   NoteStartOfMarkHRClosure startcl;
   g1h->heap_region_iterate(&startcl);
 
-  // Start weak-reference discovery.
-  ReferenceProcessor* rp = g1h->ref_processor();
-  rp->verify_no_references_recorded();
-  rp->enable_discovery(); // enable ("weak") refs discovery
+  // Start Concurrent Marking weak-reference discovery.
+  ReferenceProcessor* rp = g1h->ref_processor_cm();
+  // enable ("weak") refs discovery
+  rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@@ -1133,6 +1133,7 @@
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
+
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If a full collection has happened, we shouldn't do this.
@@ -1837,6 +1838,10 @@
   size_t cleaned_up_bytes = start_used_bytes - g1h->used();
   g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
 
+  // Clean up will have freed any regions completely full of garbage.
+  // Update the soft reference policy with the new heap occupancy.
+  Universe::update_heap_info_at_gc();
+
   // We need to make this be a "collection" so any collection pause that
   // races with it goes around and waits for completeCleanup to finish.
   g1h->increment_total_collections();
@@ -2072,8 +2077,10 @@
   }
 };
 
-// Implementation of AbstractRefProcTaskExecutor for G1
-class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+// Implementation of AbstractRefProcTaskExecutor for parallel
+// reference processing at the end of G1 concurrent marking
+
+class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 private:
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
@@ -2082,7 +2089,7 @@
   int              _active_workers;
 
 public:
-  G1RefProcTaskExecutor(G1CollectedHeap* g1h,
+  G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
                         ConcurrentMark* cm,
                         CMBitMap* bitmap,
                         WorkGang* workers,
@@ -2096,7 +2103,7 @@
   virtual void execute(EnqueueTask& task);
 };
 
-class G1RefProcTaskProxy: public AbstractGangTask {
+class G1CMRefProcTaskProxy: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   ProcessTask&     _proc_task;
   G1CollectedHeap* _g1h;
@@ -2104,7 +2111,7 @@
   CMBitMap*        _bitmap;
 
 public:
-  G1RefProcTaskProxy(ProcessTask& proc_task,
+  G1CMRefProcTaskProxy(ProcessTask& proc_task,
                      G1CollectedHeap* g1h,
                      ConcurrentMark* cm,
                      CMBitMap* bitmap) :
@@ -2122,10 +2129,10 @@
   }
 };
 
-void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
+void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
 
-  G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
+  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
 
   // We need to reset the phase for each task execution so that
   // the termination protocol of CMTask::do_marking_step works.
@@ -2135,12 +2142,12 @@
   _g1h->set_par_threads(0);
 }
 
-class G1RefEnqueueTaskProxy: public AbstractGangTask {
+class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   EnqueueTask& _enq_task;
 
 public:
-  G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
+  G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
     AbstractGangTask("Enqueue reference objects in parallel"),
     _enq_task(enq_task)
   { }
@@ -2150,10 +2157,10 @@
   }
 };
 
-void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
+void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
 
-  G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
+  G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
 
   _g1h->set_par_threads(_active_workers);
   _workers->run_task(&enq_task_proxy);
@@ -2163,71 +2170,84 @@
 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   ResourceMark rm;
   HandleMark   hm;
-  G1CollectedHeap* g1h   = G1CollectedHeap::heap();
-  ReferenceProcessor* rp = g1h->ref_processor();
-
-  // See the comment in G1CollectedHeap::ref_processing_init()
-  // about how reference processing currently works in G1.
-
-  // Process weak references.
-  rp->setup_policy(clear_all_soft_refs);
-  assert(_markStack.isEmpty(), "mark stack should be empty");
-
-  G1CMIsAliveClosure   g1_is_alive(g1h);
-  G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
-  G1CMDrainMarkingStackClosure
-    g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
-  // We use the work gang from the G1CollectedHeap and we utilize all
-  // the worker threads.
-  int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
-  active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
-
-  G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
-                                          g1h->workers(), active_workers);
-
-
-  if (rp->processing_is_mt()) {
-    // Set the degree of MT here.  If the discovery is done MT, there
-    // may have been a different number of threads doing the discovery
-    // and a different number of discovered lists may have Ref objects.
-    // That is OK as long as the Reference lists are balanced (see
-    // balance_all_queues() and balance_queues()).
-    rp->set_active_mt_degree(active_workers);
-
-    rp->process_discovered_references(&g1_is_alive,
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  // Is alive closure.
+  G1CMIsAliveClosure g1_is_alive(g1h);
+
+  // Inner scope to exclude the cleaning of the string and symbol
+  // tables from the displayed time.
+  {
+    bool verbose = PrintGC && PrintGCDetails;
+    if (verbose) {
+      gclog_or_tty->put(' ');
+    }
+    TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
+
+    ReferenceProcessor* rp = g1h->ref_processor_cm();
+
+    // See the comment in G1CollectedHeap::ref_processing_init()
+    // about how reference processing currently works in G1.
+
+    // Process weak references.
+    rp->setup_policy(clear_all_soft_refs);
+    assert(_markStack.isEmpty(), "mark stack should be empty");
+
+    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
+    G1CMDrainMarkingStackClosure
+      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
+
+    // We use the work gang from the G1CollectedHeap and we utilize all
+    // the worker threads.
+    int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
+    active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
+
+    G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
+                                              g1h->workers(), active_workers);
+
+    if (rp->processing_is_mt()) {
+      // Set the degree of MT here.  If the discovery is done MT, there
+      // may have been a different number of threads doing the discovery
+      // and a different number of discovered lists may have Ref objects.
+      // That is OK as long as the Reference lists are balanced (see
+      // balance_all_queues() and balance_queues()).
+      rp->set_active_mt_degree(active_workers);
+
+      rp->process_discovered_references(&g1_is_alive,
                                       &g1_keep_alive,
                                       &g1_drain_mark_stack,
                                       &par_task_executor);
 
-    // The work routines of the parallel keep_alive and drain_marking_stack
-    // will set the has_overflown flag if we overflow the global marking
-    // stack.
-  } else {
-    rp->process_discovered_references(&g1_is_alive,
-                                      &g1_keep_alive,
-                                      &g1_drain_mark_stack,
-                                      NULL);
-
+      // The work routines of the parallel keep_alive and drain_marking_stack
+      // will set the has_overflown flag if we overflow the global marking
+      // stack.
+    } else {
+      rp->process_discovered_references(&g1_is_alive,
+                                        &g1_keep_alive,
+                                        &g1_drain_mark_stack,
+                                        NULL);
+    }
+
+    assert(_markStack.overflow() || _markStack.isEmpty(),
+            "mark stack should be empty (unless it overflowed)");
+    if (_markStack.overflow()) {
+      // Should have been done already when we tried to push an
+      // entry on to the global mark stack. But let's do it again.
+      set_has_overflown();
+    }
+
+    if (rp->processing_is_mt()) {
+      assert(rp->num_q() == active_workers, "why not");
+      rp->enqueue_discovered_references(&par_task_executor);
+    } else {
+      rp->enqueue_discovered_references();
+    }
+
+    rp->verify_no_references_recorded();
+    assert(!rp->discovery_enabled(), "Post condition");
   }
 
-  assert(_markStack.overflow() || _markStack.isEmpty(),
-      "mark stack should be empty (unless it overflowed)");
-  if (_markStack.overflow()) {
-    // Should have been done already when we tried to push an
-    // entry on to the global mark stack. But let's do it again.
-    set_has_overflown();
-  }
-
-  if (rp->processing_is_mt()) {
-    assert(rp->num_q() == active_workers, "why not");
-    rp->enqueue_discovered_references(&par_task_executor);
-  } else {
-    rp->enqueue_discovered_references();
-  }
-
-  rp->verify_no_references_recorded();
-  assert(!rp->discovery_enabled(), "should have been disabled");
-
   // Now clean up stale oops in StringTable
   StringTable::unlink(&g1_is_alive);
   // Clean up unreferenced symbols in symbol table.
@@ -3329,7 +3349,7 @@
   assert(_ref_processor == NULL, "should be initialized to NULL");
 
   if (G1UseConcMarkReferenceProcessing) {
-    _ref_processor = g1h->ref_processor();
+    _ref_processor = g1h->ref_processor_cm();
     assert(_ref_processor != NULL, "should not be NULL");
   }
 }
@@ -4564,6 +4584,15 @@
                  G1PPRL_DOUBLE_H_FORMAT,
                  "type", "address-range",
                  "used", "prev-live", "next-live", "gc-eff");
+  _out->print_cr(G1PPRL_LINE_PREFIX
+                 G1PPRL_TYPE_H_FORMAT
+                 G1PPRL_ADDR_BASE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_BYTE_H_FORMAT
+                 G1PPRL_DOUBLE_H_FORMAT,
+                 "", "",
+                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
 }
 
 // It takes as a parameter a reference to one of the _hum_* fields, it
@@ -4575,7 +4604,7 @@
   // The > 0 check is to deal with the prev and next live bytes which
   // could be 0.
   if (*hum_bytes > 0) {
-    bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes);
+    bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
     *hum_bytes -= bytes;
   }
   return bytes;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -366,8 +366,8 @@
   friend class CMConcurrentMarkingTask;
   friend class G1ParNoteEndTask;
   friend class CalcLiveObjectsClosure;
-  friend class G1RefProcTaskProxy;
-  friend class G1RefProcTaskExecutor;
+  friend class G1CMRefProcTaskProxy;
+  friend class G1CMRefProcTaskExecutor;
   friend class G1CMParKeepAliveAndDrainClosure;
   friend class G1CMParDrainMarkingStackClosure;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -42,6 +42,7 @@
 #include "memory/gcLocker.inline.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/generationSpec.hpp"
+#include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
 #include "runtime/aprofiler.hpp"
@@ -551,8 +552,7 @@
 }
 
 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
-  assert(!isHumongous(word_size) ||
-                                  word_size <= (size_t) HeapRegion::GrainWords,
+  assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
@@ -815,6 +815,11 @@
     result =
       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
     assert(result != NULL, "it should always return a valid result");
+
+    // A successful humongous object allocation changes the used space
+    // information of the old generation so we need to recalculate the
+    // sizes and update the jstat counters here.
+    g1mm()->update_sizes();
   }
 
   verify_region_sets_optional();
@@ -1164,7 +1169,7 @@
       if (!hr->isHumongous()) {
         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
       } else if (hr->startsHumongous()) {
-        if (hr->capacity() == (size_t) HeapRegion::GrainBytes) {
+        if (hr->capacity() == HeapRegion::GrainBytes) {
           // single humongous region
           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
         } else {
@@ -1244,15 +1249,11 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    // We want to discover references, but not process them yet.
-    // This mode is disabled in
-    // instanceRefKlass::process_discovered_references if the
-    // generation does some collection work, or
-    // instanceRefKlass::enqueue_discovered_references if the
-    // generation returns without doing any work.
-    ref_processor()->disable_discovery();
-    ref_processor()->abandon_partial_discovery();
-    ref_processor()->verify_no_references_recorded();
+    // Disable discovery and empty the discovered lists
+    // for the CM ref processor.
+    ref_processor_cm()->disable_discovery();
+    ref_processor_cm()->abandon_partial_discovery();
+    ref_processor_cm()->verify_no_references_recorded();
 
     // Abandon current iterations of concurrent marking and concurrent
     // refinement, if any are in progress.
@@ -1280,31 +1281,33 @@
     empty_young_list();
     g1_policy()->set_full_young_gcs(true);
 
-    // See the comment in G1CollectedHeap::ref_processing_init() about
+    // See the comments in g1CollectedHeap.hpp and
+    // G1CollectedHeap::ref_processing_init() about
     // how reference processing currently works in G1.
 
-    // Temporarily make reference _discovery_ single threaded (non-MT).
-    ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
-
-    // Temporarily make refs discovery atomic
-    ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
-
-    // Temporarily clear _is_alive_non_header
-    ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
-
-    ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(do_clear_all_soft_refs);
+    // Temporarily make discovery by the STW ref processor single threaded (non-MT).
+    ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
+
+    // Temporarily clear the STW ref processor's _is_alive_non_header field.
+    ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
+
+    ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+    ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
+
     // Do collection work
     {
       HandleMark hm;  // Discard invalid handles created during gc
-      G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
+      G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
     }
+
     assert(free_regions() == 0, "we should not have added any free regions");
     rebuild_region_lists();
 
     _summary_bytes_used = recalculate_used();
 
-    ref_processor()->enqueue_discovered_references();
+    // Enqueue any discovered reference objects that have
+    // not been removed from the discovered lists.
+    ref_processor_stw()->enqueue_discovered_references();
 
     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
@@ -1319,7 +1322,16 @@
                        /* option      */ VerifyOption_G1UsePrevMarking);
 
     }
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
+
+    assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+    ref_processor_stw()->verify_no_references_recorded();
+
+    // Note: since we've just done a full GC, concurrent
+    // marking is no longer active. Therefore we need not
+    // re-enable reference discovery for the CM ref processor.
+    // That will be done at the start of the next marking cycle.
+    assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
+    ref_processor_cm()->verify_no_references_recorded();
 
     reset_gc_time_stamp();
     // Since everything potentially moved, we will clear all remembered
@@ -1414,7 +1426,7 @@
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
-  g1mm()->update_counters();
+  g1mm()->update_sizes();
   post_full_gc_dump();
 
   return true;
@@ -1772,14 +1784,17 @@
   _g1_policy(policy_),
   _dirty_card_queue_set(false),
   _into_cset_dirty_card_queue_set(false),
-  _is_alive_closure(this),
-  _ref_processor(NULL),
+  _is_alive_closure_cm(this),
+  _is_alive_closure_stw(this),
+  _ref_processor_cm(NULL),
+  _ref_processor_stw(NULL),
   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
   _bot_shared(NULL),
   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
   _evac_failure_scan_stack(NULL) ,
   _mark_in_progress(false),
   _cg1r(NULL), _summary_bytes_used(0),
+  _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
   _free_list("Master Free List"),
@@ -1955,7 +1970,7 @@
 
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
-  guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
+  guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
             "too many cards per region");
 
   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
@@ -2059,7 +2074,7 @@
 
   // Do create of the monitoring and management support so that
   // values in the heap have been properly initialized.
-  _g1mm = new G1MonitoringSupport(this, &_g1_storage);
+  _g1mm = new G1MonitoringSupport(this);
 
   return JNI_OK;
 }
@@ -2067,34 +2082,81 @@
 void G1CollectedHeap::ref_processing_init() {
   // Reference processing in G1 currently works as follows:
   //
-  // * There is only one reference processor instance that
-  //   'spans' the entire heap. It is created by the code
-  //   below.
-  // * Reference discovery is not enabled during an incremental
-  //   pause (see 6484982).
-  // * Discoverered refs are not enqueued nor are they processed
-  //   during an incremental pause (see 6484982).
-  // * Reference discovery is enabled at initial marking.
-  // * Reference discovery is disabled and the discovered
-  //   references processed etc during remarking.
-  // * Reference discovery is MT (see below).
-  // * Reference discovery requires a barrier (see below).
-  // * Reference processing is currently not MT (see 6608385).
-  // * A full GC enables (non-MT) reference discovery and
-  //   processes any discovered references.
+  // * There are two reference processor instances. One is
+  //   used to record and process discovered references
+  //   during concurrent marking; the other is used to
+  //   record and process references during STW pauses
+  //   (both full and incremental).
+  // * Both ref processors need to 'span' the entire heap as
+  //   the regions in the collection set may be dotted around.
+  //
+  // * For the concurrent marking ref processor:
+  //   * Reference discovery is enabled at initial marking.
+  //   * Reference discovery is disabled and the discovered
+  //     references processed etc during remarking.
+  //   * Reference discovery is MT (see below).
+  //   * Reference discovery requires a barrier (see below).
+  //   * Reference processing may or may not be MT
+  //     (depending on the value of ParallelRefProcEnabled
+  //     and ParallelGCThreads).
+  //   * A full GC disables reference discovery by the CM
+  //     ref processor and abandons any entries on it's
+  //     discovered lists.
+  //
+  // * For the STW processor:
+  //   * Non MT discovery is enabled at the start of a full GC.
+  //   * Processing and enqueueing during a full GC is non-MT.
+  //   * During a full GC, references are processed after marking.
+  //
+  //   * Discovery (may or may not be MT) is enabled at the start
+  //     of an incremental evacuation pause.
+  //   * References are processed near the end of a STW evacuation pause.
+  //   * For both types of GC:
+  //     * Discovery is atomic - i.e. not concurrent.
+  //     * Reference discovery will not need a barrier.
 
   SharedHeap::ref_processing_init();
   MemRegion mr = reserved_region();
-  _ref_processor =
+
+  // Concurrent Mark ref processor
+  _ref_processor_cm =
     new ReferenceProcessor(mr,    // span
-                           ParallelRefProcEnabled && (ParallelGCThreads > 1),    // mt processing
-                           (int) ParallelGCThreads,   // degree of mt processing
-                           ParallelGCThreads > 1 || ConcGCThreads > 1,  // mt discovery
-                           (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
-                           false,                     // Reference discovery is not atomic
-                           &_is_alive_closure,        // is alive closure for efficiency
-                           true);                     // Setting next fields of discovered
-                                                      // lists requires a barrier.
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
+                                // mt processing
+                           (int) ParallelGCThreads,
+                                // degree of mt processing
+                           (ParallelGCThreads > 1) || (ConcGCThreads > 1),
+                                // mt discovery
+                           (int) MAX2(ParallelGCThreads, ConcGCThreads),
+                                // degree of mt discovery
+                           false,
+                                // Reference discovery is not atomic
+                           &_is_alive_closure_cm,
+                                // is alive closure
+                                // (for efficiency/performance)
+                           true);
+                                // Setting next fields of discovered
+                                // lists requires a barrier.
+
+  // STW ref processor
+  _ref_processor_stw =
+    new ReferenceProcessor(mr,    // span
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
+                                // mt processing
+                           MAX2((int)ParallelGCThreads, 1),
+                                // degree of mt processing
+                           (ParallelGCThreads > 1),
+                                // mt discovery
+                           MAX2((int)ParallelGCThreads, 1),
+                                // degree of mt discovery
+                           true,
+                                // Reference discovery is atomic
+                           &_is_alive_closure_stw,
+                                // is alive closure
+                                // (for efficiency/performance)
+                           false);
+                                // Setting next fields of discovered
+                                // lists requires a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -2988,8 +3050,7 @@
             _g1_storage.high(),
             _g1_storage.high_boundary());
   st->cr();
-  st->print("  region size " SIZE_FORMAT "K, ",
-            HeapRegion::GrainBytes/K);
+  st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   size_t young_regions = _young_list->length();
   st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
             young_regions, young_regions * HeapRegion::GrainBytes / K);
@@ -3117,6 +3178,10 @@
   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
                         "derived pointer present"));
   // always_do_update_barrier = true;
+
+  // We have just completed a GC. Update the soft reference
+  // policy with the new heap occupancy
+  Universe::update_heap_info_at_gc();
 }
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
@@ -3298,6 +3363,14 @@
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
 
+    // We do not allow initial-mark to be piggy-backed on a
+    // partially-young GC.
+    assert(!g1_policy()->during_initial_mark_pause() ||
+            g1_policy()->full_young_gcs(), "sanity");
+
+    // We also do not allow partially-young GCs during marking.
+    assert(!mark_in_progress() || g1_policy()->full_young_gcs(), "sanity");
+
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->full_young_gcs()) {
@@ -3354,231 +3427,242 @@
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-      // Please see comment in G1CollectedHeap::ref_processing_init()
-      // to see how reference processing currently works in G1.
-      //
-      // We want to turn off ref discovery, if necessary, and turn it back on
-      // on again later if we do. XXX Dubious: why is discovery disabled?
-      bool was_enabled = ref_processor()->discovery_enabled();
-      if (was_enabled) ref_processor()->disable_discovery();
-
-      // Forget the current alloc region (we might even choose it to be part
-      // of the collection set!).
-      release_mutator_alloc_region();
-
-      // We should call this after we retire the mutator alloc
-      // region(s) so that all the ALLOC / RETIRE events are generated
-      // before the start GC event.
-      _hr_printer.start_gc(false /* full */, (size_t) total_collections());
-
-      // The elapsed time induced by the start time below deliberately elides
-      // the possible verification above.
-      double start_time_sec = os::elapsedTime();
-      size_t start_used_bytes = used();
+      // Please see comment in g1CollectedHeap.hpp and
+      // G1CollectedHeap::ref_processing_init() to see how
+      // reference processing currently works in G1.
+
+      // Enable discovery in the STW reference processor
+      ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
+                                            true /*verify_no_refs*/);
+
+      {
+        // We want to temporarily turn off discovery by the
+        // CM ref processor, if necessary, and turn it back on
+        // on again later if we do. Using a scoped
+        // NoRefDiscovery object will do this.
+        NoRefDiscovery no_cm_discovery(ref_processor_cm());
+
+        // Forget the current alloc region (we might even choose it to be part
+        // of the collection set!).
+        release_mutator_alloc_region();
+
+        // We should call this after we retire the mutator alloc
+        // region(s) so that all the ALLOC / RETIRE events are generated
+        // before the start GC event.
+        _hr_printer.start_gc(false /* full */, (size_t) total_collections());
+
+        // The elapsed time induced by the start time below deliberately elides
+        // the possible verification above.
+        double start_time_sec = os::elapsedTime();
+        size_t start_used_bytes = used();
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->record_collection_pause_start(start_time_sec,
-                                                 start_used_bytes);
+        g1_policy()->record_collection_pause_start(start_time_sec,
+                                                   start_used_bytes);
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
-      _young_list->print();
+        gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
+        _young_list->print();
 #endif // YOUNG_LIST_VERBOSE
 
-      if (g1_policy()->during_initial_mark_pause()) {
-        concurrent_mark()->checkpointRootsInitialPre();
-      }
-      perm_gen()->save_marks();
-
-      // We must do this before any possible evacuation that should propagate
-      // marks.
-      if (mark_in_progress()) {
-        double start_time_sec = os::elapsedTime();
-
-        _cm->drainAllSATBBuffers();
-        double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
-        g1_policy()->record_satb_drain_time(finish_mark_ms);
-      }
-      // Record the number of elements currently on the mark stack, so we
-      // only iterate over these.  (Since evacuation may add to the mark
-      // stack, doing more exposes race conditions.)  If no mark is in
-      // progress, this will be zero.
-      _cm->set_oops_do_bound();
-
-      if (mark_in_progress()) {
-        concurrent_mark()->newCSet();
-      }
+        if (g1_policy()->during_initial_mark_pause()) {
+          concurrent_mark()->checkpointRootsInitialPre();
+        }
+        perm_gen()->save_marks();
+
+        // We must do this before any possible evacuation that should propagate
+        // marks.
+        if (mark_in_progress()) {
+          double start_time_sec = os::elapsedTime();
+
+          _cm->drainAllSATBBuffers();
+          double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
+          g1_policy()->record_satb_drain_time(finish_mark_ms);
+        }
+        // Record the number of elements currently on the mark stack, so we
+        // only iterate over these.  (Since evacuation may add to the mark
+        // stack, doing more exposes race conditions.)  If no mark is in
+        // progress, this will be zero.
+        _cm->set_oops_do_bound();
+
+        if (mark_in_progress()) {
+          concurrent_mark()->newCSet();
+        }
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->choose_collection_set(target_pause_time_ms);
-
-      if (_hr_printer.is_active()) {
-        HeapRegion* hr = g1_policy()->collection_set();
-        while (hr != NULL) {
-          G1HRPrinter::RegionType type;
-          if (!hr->is_young()) {
-            type = G1HRPrinter::Old;
-          } else if (hr->is_survivor()) {
-            type = G1HRPrinter::Survivor;
-          } else {
-            type = G1HRPrinter::Eden;
+        g1_policy()->choose_collection_set(target_pause_time_ms);
+
+        if (_hr_printer.is_active()) {
+          HeapRegion* hr = g1_policy()->collection_set();
+          while (hr != NULL) {
+            G1HRPrinter::RegionType type;
+            if (!hr->is_young()) {
+              type = G1HRPrinter::Old;
+            } else if (hr->is_survivor()) {
+              type = G1HRPrinter::Survivor;
+            } else {
+              type = G1HRPrinter::Eden;
+            }
+            _hr_printer.cset(hr);
+            hr = hr->next_in_collection_set();
           }
-          _hr_printer.cset(hr);
-          hr = hr->next_in_collection_set();
         }
-      }
-
-      // We have chosen the complete collection set. If marking is
-      // active then, we clear the region fields of any of the
-      // concurrent marking tasks whose region fields point into
-      // the collection set as these values will become stale. This
-      // will cause the owning marking threads to claim a new region
-      // when marking restarts.
-      if (mark_in_progress()) {
-        concurrent_mark()->reset_active_task_region_fields_in_cset();
-      }
+
+        // We have chosen the complete collection set. If marking is
+        // active then, we clear the region fields of any of the
+        // concurrent marking tasks whose region fields point into
+        // the collection set as these values will become stale. This
+        // will cause the owning marking threads to claim a new region
+        // when marking restarts.
+        if (mark_in_progress()) {
+          concurrent_mark()->reset_active_task_region_fields_in_cset();
+        }
 
 #ifdef ASSERT
-      VerifyCSetClosure cl;
-      collection_set_iterate(&cl);
+        VerifyCSetClosure cl;
+        collection_set_iterate(&cl);
 #endif // ASSERT
 
-      setup_surviving_young_words();
-
-      // Initialize the GC alloc regions.
-      init_gc_alloc_regions();
-
-      // Actually do the work...
-      evacuate_collection_set();
-
-      free_collection_set(g1_policy()->collection_set());
-      g1_policy()->clear_collection_set();
-
-      cleanup_surviving_young_words();
-
-      // Start a new incremental collection set for the next pause.
-      g1_policy()->start_incremental_cset_building();
-
-      // Clear the _cset_fast_test bitmap in anticipation of adding
-      // regions to the incremental collection set for the next
-      // evacuation pause.
-      clear_cset_fast_test();
-
-      _young_list->reset_sampled_info();
-
-      // Don't check the whole heap at this point as the
-      // GC alloc regions from this pause have been tagged
-      // as survivors and moved on to the survivor list.
-      // Survivor regions will fail the !is_young() check.
-      assert(check_young_list_empty(false /* check_heap */),
-        "young list should be empty");
+        setup_surviving_young_words();
+
+        // Initialize the GC alloc regions.
+        init_gc_alloc_regions();
+
+        // Actually do the work...
+        evacuate_collection_set();
+
+        free_collection_set(g1_policy()->collection_set());
+        g1_policy()->clear_collection_set();
+
+        cleanup_surviving_young_words();
+
+        // Start a new incremental collection set for the next pause.
+        g1_policy()->start_incremental_cset_building();
+
+        // Clear the _cset_fast_test bitmap in anticipation of adding
+        // regions to the incremental collection set for the next
+        // evacuation pause.
+        clear_cset_fast_test();
+
+        _young_list->reset_sampled_info();
+
+        // Don't check the whole heap at this point as the
+        // GC alloc regions from this pause have been tagged
+        // as survivors and moved on to the survivor list.
+        // Survivor regions will fail the !is_young() check.
+        assert(check_young_list_empty(false /* check_heap */),
+          "young list should be empty");
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
-      _young_list->print();
+        gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
+        _young_list->print();
 #endif // YOUNG_LIST_VERBOSE
 
-      g1_policy()->record_survivor_regions(_young_list->survivor_length(),
-        _young_list->first_survivor_region(),
-        _young_list->last_survivor_region());
-
-      _young_list->reset_auxilary_lists();
-
-      if (evacuation_failed()) {
-        _summary_bytes_used = recalculate_used();
-      } else {
-        // The "used" of the the collection set have already been subtracted
-        // when they were freed.  Add in the bytes evacuated.
-        _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
-      }
-
-      if (g1_policy()->during_initial_mark_pause()) {
-        concurrent_mark()->checkpointRootsInitialPost();
-        set_marking_started();
-        // CAUTION: after the doConcurrentMark() call below,
-        // the concurrent marking thread(s) could be running
-        // concurrently with us. Make sure that anything after
-        // this point does not assume that we are the only GC thread
-        // running. Note: of course, the actual marking work will
-        // not start until the safepoint itself is released in
-        // ConcurrentGCThread::safepoint_desynchronize().
-        doConcurrentMark();
-      }
-
-      allocate_dummy_regions();
+        g1_policy()->record_survivor_regions(_young_list->survivor_length(),
+                                            _young_list->first_survivor_region(),
+                                            _young_list->last_survivor_region());
+
+        _young_list->reset_auxilary_lists();
+
+        if (evacuation_failed()) {
+          _summary_bytes_used = recalculate_used();
+        } else {
+          // The "used" of the the collection set have already been subtracted
+          // when they were freed.  Add in the bytes evacuated.
+          _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
+        }
+
+        if (g1_policy()->during_initial_mark_pause()) {
+          concurrent_mark()->checkpointRootsInitialPost();
+          set_marking_started();
+          // CAUTION: after the doConcurrentMark() call below,
+          // the concurrent marking thread(s) could be running
+          // concurrently with us. Make sure that anything after
+          // this point does not assume that we are the only GC thread
+          // running. Note: of course, the actual marking work will
+          // not start until the safepoint itself is released in
+          // ConcurrentGCThread::safepoint_desynchronize().
+          doConcurrentMark();
+        }
+
+        allocate_dummy_regions();
 
 #if YOUNG_LIST_VERBOSE
-      gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
-      _young_list->print();
-      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+        gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
+        _young_list->print();
+        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-      init_mutator_alloc_region();
-
-      {
-        size_t expand_bytes = g1_policy()->expansion_amount();
-        if (expand_bytes > 0) {
-          size_t bytes_before = capacity();
-          if (!expand(expand_bytes)) {
-            // We failed to expand the heap so let's verify that
-            // committed/uncommitted amount match the backing store
-            assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
-            assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+        init_mutator_alloc_region();
+
+        {
+          size_t expand_bytes = g1_policy()->expansion_amount();
+          if (expand_bytes > 0) {
+            size_t bytes_before = capacity();
+            if (!expand(expand_bytes)) {
+              // We failed to expand the heap so let's verify that
+              // committed/uncommitted amount match the backing store
+              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
+              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+            }
           }
         }
+
+        double end_time_sec = os::elapsedTime();
+        double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
+        g1_policy()->record_pause_time_ms(pause_time_ms);
+        g1_policy()->record_collection_pause_end();
+
+        MemoryService::track_memory_usage();
+
+        // In prepare_for_verify() below we'll need to scan the deferred
+        // update buffers to bring the RSets up-to-date if
+        // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
+        // the update buffers we'll probably need to scan cards on the
+        // regions we just allocated to (i.e., the GC alloc
+        // regions). However, during the last GC we called
+        // set_saved_mark() on all the GC alloc regions, so card
+        // scanning might skip the [saved_mark_word()...top()] area of
+        // those regions (i.e., the area we allocated objects into
+        // during the last GC). But it shouldn't. Given that
+        // saved_mark_word() is conditional on whether the GC time stamp
+        // on the region is current or not, by incrementing the GC time
+        // stamp here we invalidate all the GC time stamps on all the
+        // regions and saved_mark_word() will simply return top() for
+        // all the regions. This is a nicer way of ensuring this rather
+        // than iterating over the regions and fixing them. In fact, the
+        // GC time stamp increment here also ensures that
+        // saved_mark_word() will return top() between pauses, i.e.,
+        // during concurrent refinement. So we don't need the
+        // is_gc_active() check to decided which top to use when
+        // scanning cards (see CR 7039627).
+        increment_gc_time_stamp();
+
+        if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
+          HandleMark hm;  // Discard invalid handles created during verification
+          gclog_or_tty->print(" VerifyAfterGC:");
+          prepare_for_verify();
+          Universe::verify(/* allow dirty */ true,
+                           /* silent      */ false,
+                           /* option      */ VerifyOption_G1UsePrevMarking);
+        }
+
+        assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+        ref_processor_stw()->verify_no_references_recorded();
+
+        // CM reference discovery will be re-enabled if necessary.
       }
 
-      double end_time_sec = os::elapsedTime();
-      double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
-      g1_policy()->record_pause_time_ms(pause_time_ms);
-      g1_policy()->record_collection_pause_end();
-
-      MemoryService::track_memory_usage();
-
-      // In prepare_for_verify() below we'll need to scan the deferred
-      // update buffers to bring the RSets up-to-date if
-      // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
-      // the update buffers we'll probably need to scan cards on the
-      // regions we just allocated to (i.e., the GC alloc
-      // regions). However, during the last GC we called
-      // set_saved_mark() on all the GC alloc regions, so card
-      // scanning might skip the [saved_mark_word()...top()] area of
-      // those regions (i.e., the area we allocated objects into
-      // during the last GC). But it shouldn't. Given that
-      // saved_mark_word() is conditional on whether the GC time stamp
-      // on the region is current or not, by incrementing the GC time
-      // stamp here we invalidate all the GC time stamps on all the
-      // regions and saved_mark_word() will simply return top() for
-      // all the regions. This is a nicer way of ensuring this rather
-      // than iterating over the regions and fixing them. In fact, the
-      // GC time stamp increment here also ensures that
-      // saved_mark_word() will return top() between pauses, i.e.,
-      // during concurrent refinement. So we don't need the
-      // is_gc_active() check to decided which top to use when
-      // scanning cards (see CR 7039627).
-      increment_gc_time_stamp();
-
-      if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
-        HandleMark hm;  // Discard invalid handles created during verification
-        gclog_or_tty->print(" VerifyAfterGC:");
-        prepare_for_verify();
-        Universe::verify(/* allow dirty */ true,
-                         /* silent      */ false,
-                         /* option      */ VerifyOption_G1UsePrevMarking);
-      }
-
-      if (was_enabled) ref_processor()->enable_discovery();
-
       {
         size_t expand_bytes = g1_policy()->expansion_amount();
         if (expand_bytes > 0) {
@@ -3630,7 +3714,7 @@
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
-  g1mm()->update_counters();
+  g1mm()->update_sizes();
 
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3728,34 +3812,6 @@
   _evac_failure_scan_stack = NULL;
 }
 
-// *** Sequential G1 Evacuation
-
-class G1IsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
-public:
-  G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_object(oop p) { assert(false, "Do not call."); }
-  bool do_object_b(oop p) {
-    // It is reachable if it is outside the collection set, or is inside
-    // and forwarded.
-    return !_g1->obj_in_cs(p) || p->is_forwarded();
-  }
-};
-
-class G1KeepAliveClosure: public OopClosure {
-  G1CollectedHeap* _g1;
-public:
-  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
-  void do_oop(      oop* p) {
-    oop obj = *p;
-    if (_g1->obj_in_cs(obj)) {
-      assert( obj->is_forwarded(), "invariant" );
-      *p = obj->forwardee();
-    }
-  }
-};
-
 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
 private:
   G1CollectedHeap* _g1;
@@ -3946,7 +4002,8 @@
 
 oop
 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
-                                               oop old) {
+                                               oop old,
+                                               bool should_mark_root) {
   assert(obj_in_cs(old),
          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
                  (HeapWord*) old));
@@ -3954,6 +4011,16 @@
   oop forward_ptr = old->forward_to_atomic(old);
   if (forward_ptr == NULL) {
     // Forward-to-self succeeded.
+
+    // should_mark_root will be true when this routine is called
+    // from a root scanning closure during an initial mark pause.
+    // In this case the thread that succeeds in self-forwarding the
+    // object is also responsible for marking the object.
+    if (should_mark_root) {
+      assert(!oopDesc::is_null(old), "shouldn't be");
+      _cm->grayRoot(old);
+    }
+
     if (_evac_failure_closure != cl) {
       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
       assert(!_drain_in_progress,
@@ -4175,12 +4242,17 @@
 #endif // ASSERT
 
 void G1ParScanThreadState::trim_queue() {
+  assert(_evac_cl != NULL, "not set");
+  assert(_evac_failure_cl != NULL, "not set");
+  assert(_partial_scan_cl != NULL, "not set");
+
   StarTask ref;
   do {
     // Drain the overflow stack first, so other threads can steal.
     while (refs()->pop_overflow(ref)) {
       deal_with_reference(ref);
     }
+
     while (refs()->pop_local(ref)) {
       deal_with_reference(ref);
     }
@@ -4208,7 +4280,8 @@
   }
 }
 
-oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_copy) {
+oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_root,
+                                                     bool should_mark_copy) {
   size_t    word_sz = old->size();
   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
@@ -4228,7 +4301,7 @@
     // This will either forward-to-self, or detect that someone else has
     // installed a forwarding pointer.
     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
-    return _g1->handle_evacuation_failure_par(cl, old);
+    return _g1->handle_evacuation_failure_par(cl, old, should_mark_root);
   }
 
   // We're going to allocate linearly, so might as well prefetch ahead.
@@ -4330,11 +4403,26 @@
       // we also need to handle marking of roots in the
       // event of an evacuation failure. In the event of an
       // evacuation failure, the object is forwarded to itself
-      // and not copied so let's mark it here.
+      // and not copied. For root-scanning closures, the
+      // object would be marked after a successful self-forward
+      // but an object could be pointed to by both a root and non
+      // root location and be self-forwarded by a non-root-scanning
+      // closure. Therefore we also have to attempt to mark the
+      // self-forwarded root object here.
       if (do_mark_object && obj->forwardee() == obj) {
         mark_object(p);
       }
     } else {
+      // During an initial mark pause, objects that are pointed to
+      // by the roots need to be marked - even in the event of an
+      // evacuation failure. We pass the template parameter
+      // do_mark_object (which is true for root scanning closures
+      // during an initial mark pause) to copy_to_survivor_space
+      // which will pass it on to the evacuation failure handling
+      // code. The thread that successfully self-forwards a root
+      // object to itself is responsible for marking the object.
+      bool should_mark_root = do_mark_object;
+
       // We need to mark the copied object if we're a root scanning
       // closure during an initial mark pause (i.e. do_mark_object
       // will be true), or the object is already marked and we need
@@ -4343,7 +4431,8 @@
                               _during_initial_mark ||
                               (_mark_in_progress && !_g1->is_obj_ill(obj));
 
-      oop copy_oop = copy_to_survivor_space(obj, should_mark_copy);
+      oop copy_oop = copy_to_survivor_space(obj, should_mark_root,
+                                                 should_mark_copy);
       oopDesc::encode_store_heap_oop(p, copy_oop);
     }
     // When scanning the RS, we only care about objs in CS.
@@ -4501,35 +4590,34 @@
     ResourceMark rm;
     HandleMark   hm;
 
+    ReferenceProcessor*             rp = _g1h->ref_processor_stw();
+
     G1ParScanThreadState            pss(_g1h, i);
-    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
-    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
-    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
 
     pss.set_evac_closure(&scan_evac_cl);
     pss.set_evac_failure_closure(&evac_failure_cl);
     pss.set_partial_scan_closure(&partial_scan_cl);
 
-    G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
-    G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
-    G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
-    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
-
-    G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
-    G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
-    G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
-
-    OopsInHeapRegionClosure        *scan_root_cl;
-    OopsInHeapRegionClosure        *scan_perm_cl;
+    G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
+    G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
+
+    G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
+    G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
+
+    OopClosure*                    scan_root_cl = &only_scan_root_cl;
+    OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
 
     if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
       scan_root_cl = &scan_mark_root_cl;
       scan_perm_cl = &scan_mark_perm_cl;
-    } else {
-      scan_root_cl = &only_scan_root_cl;
-      scan_perm_cl = &only_scan_perm_cl;
     }
 
+    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
+
     pss.start_strong_roots();
     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
                                   SharedHeap::SO_AllClasses,
@@ -4577,6 +4665,7 @@
                         OopsInHeapRegionClosure* scan_rs,
                         OopsInGenClosure* scan_perm,
                         int worker_i) {
+
   // First scan the strong roots, including the perm gen.
   double ext_roots_start = os::elapsedTime();
   double closure_app_time_sec = 0.0;
@@ -4595,12 +4684,13 @@
                        &eager_scan_code_roots,
                        &buf_scan_perm);
 
-  // Now the ref_processor roots.
+  // Now the CM ref_processor roots.
   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
-    // We need to treat the discovered reference lists as roots and
-    // keep entries (which are added by the marking threads) on them
-    // live until they can be processed at the end of marking.
-    ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
+    // We need to treat the discovered reference lists of the
+    // concurrent mark ref processor as roots and keep entries
+    // (which are added by the marking threads) on them live
+    // until they can be processed at the end of marking.
+    ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
   }
 
   // Finish up any enqueued closure apps (attributed as object copy time).
@@ -4641,6 +4731,524 @@
   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
 }
 
+// Weak Reference Processing support
+
+// An always "is_alive" closure that is used to preserve referents.
+// If the object is non-null then it's alive.  Used in the preservation
+// of referent objects that are pointed to by reference objects
+// discovered by the CM ref processor.
+class G1AlwaysAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_object(oop p) { assert(false, "Do not call."); }
+  bool do_object_b(oop p) {
+    if (p != NULL) {
+      return true;
+    }
+    return false;
+  }
+};
+
+bool G1STWIsAliveClosure::do_object_b(oop p) {
+  // An object is reachable if it is outside the collection set,
+  // or is inside and copied.
+  return !_g1->obj_in_cs(p) || p->is_forwarded();
+}
+
+// Non Copying Keep Alive closure
+class G1KeepAliveClosure: public OopClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
+  void do_oop(      oop* p) {
+    oop obj = *p;
+
+    if (_g1->obj_in_cs(obj)) {
+      assert( obj->is_forwarded(), "invariant" );
+      *p = obj->forwardee();
+    }
+  }
+};
+
+// Copying Keep Alive closure - can be called from both
+// serial and parallel code as long as different worker
+// threads utilize different G1ParScanThreadState instances
+// and different queues.
+
+class G1CopyingKeepAliveClosure: public OopClosure {
+  G1CollectedHeap*         _g1h;
+  OopClosure*              _copy_non_heap_obj_cl;
+  OopsInHeapRegionClosure* _copy_perm_obj_cl;
+  G1ParScanThreadState*    _par_scan_state;
+
+public:
+  G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
+                            OopClosure* non_heap_obj_cl,
+                            OopsInHeapRegionClosure* perm_obj_cl,
+                            G1ParScanThreadState* pss):
+    _g1h(g1h),
+    _copy_non_heap_obj_cl(non_heap_obj_cl),
+    _copy_perm_obj_cl(perm_obj_cl),
+    _par_scan_state(pss)
+  {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(      oop* p) { do_oop_work(p); }
+
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+
+    if (_g1h->obj_in_cs(obj)) {
+      // If the referent object has been forwarded (either copied
+      // to a new location or to itself in the event of an
+      // evacuation failure) then we need to update the reference
+      // field and, if both reference and referent are in the G1
+      // heap, update the RSet for the referent.
+      //
+      // If the referent has not been forwarded then we have to keep
+      // it alive by policy. Therefore we have copy the referent.
+      //
+      // If the reference field is in the G1 heap then we can push
+      // on the PSS queue. When the queue is drained (after each
+      // phase of reference processing) the object and it's followers
+      // will be copied, the reference field set to point to the
+      // new location, and the RSet updated. Otherwise we need to
+      // use the the non-heap or perm closures directly to copy
+      // the refernt object and update the pointer, while avoiding
+      // updating the RSet.
+
+      if (_g1h->is_in_g1_reserved(p)) {
+        _par_scan_state->push_on_queue(p);
+      } else {
+        // The reference field is not in the G1 heap.
+        if (_g1h->perm_gen()->is_in(p)) {
+          _copy_perm_obj_cl->do_oop(p);
+        } else {
+          _copy_non_heap_obj_cl->do_oop(p);
+        }
+      }
+    }
+  }
+};
+
+// Serial drain queue closure. Called as the 'complete_gc'
+// closure for each discovered list in some of the
+// reference processing phases.
+
+class G1STWDrainQueueClosure: public VoidClosure {
+protected:
+  G1CollectedHeap* _g1h;
+  G1ParScanThreadState* _par_scan_state;
+
+  G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
+
+public:
+  G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
+    _g1h(g1h),
+    _par_scan_state(pss)
+  { }
+
+  void do_void() {
+    G1ParScanThreadState* const pss = par_scan_state();
+    pss->trim_queue();
+  }
+};
+
+// Parallel Reference Processing closures
+
+// Implementation of AbstractRefProcTaskExecutor for parallel reference
+// processing during G1 evacuation pauses.
+
+class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+private:
+  G1CollectedHeap*   _g1h;
+  RefToScanQueueSet* _queues;
+  WorkGang*          _workers;
+  int                _active_workers;
+
+public:
+  G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
+                        WorkGang* workers,
+                        RefToScanQueueSet *task_queues,
+                        int n_workers) :
+    _g1h(g1h),
+    _queues(task_queues),
+    _workers(workers),
+    _active_workers(n_workers)
+  {
+    assert(n_workers > 0, "shouldn't call this otherwise");
+  }
+
+  // Executes the given task using concurrent marking worker threads.
+  virtual void execute(ProcessTask& task);
+  virtual void execute(EnqueueTask& task);
+};
+
+// Gang task for possibly parallel reference processing
+
+class G1STWRefProcTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+  ProcessTask&     _proc_task;
+  G1CollectedHeap* _g1h;
+  RefToScanQueueSet *_task_queues;
+  ParallelTaskTerminator* _terminator;
+
+public:
+  G1STWRefProcTaskProxy(ProcessTask& proc_task,
+                     G1CollectedHeap* g1h,
+                     RefToScanQueueSet *task_queues,
+                     ParallelTaskTerminator* terminator) :
+    AbstractGangTask("Process reference objects in parallel"),
+    _proc_task(proc_task),
+    _g1h(g1h),
+    _task_queues(task_queues),
+    _terminator(terminator)
+  {}
+
+  virtual void work(int i) {
+    // The reference processing task executed by a single worker.
+    ResourceMark rm;
+    HandleMark   hm;
+
+    G1STWIsAliveClosure is_alive(_g1h);
+
+    G1ParScanThreadState pss(_g1h, i);
+
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
+
+    pss.set_evac_closure(&scan_evac_cl);
+    pss.set_evac_failure_closure(&evac_failure_cl);
+    pss.set_partial_scan_closure(&partial_scan_cl);
+
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
+
+    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
+      copy_non_heap_cl = &copy_mark_non_heap_cl;
+      copy_perm_cl = &copy_mark_perm_cl;
+    }
+
+    // Keep alive closure.
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
+
+    // Complete GC closure
+    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
+
+    // Call the reference processing task's work routine.
+    _proc_task.work(i, is_alive, keep_alive, drain_queue);
+
+    // Note we cannot assert that the refs array is empty here as not all
+    // of the processing tasks (specifically phase2 - pp2_work) execute
+    // the complete_gc closure (which ordinarily would drain the queue) so
+    // the queue may not be empty.
+  }
+};
+
+// Driver routine for parallel reference processing.
+// Creates an instance of the ref processing gang
+// task and has the worker threads execute it.
+void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
+  assert(_workers != NULL, "Need parallel worker threads.");
+
+  ParallelTaskTerminator terminator(_active_workers, _queues);
+  G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
+
+  _g1h->set_par_threads(_active_workers);
+  _workers->run_task(&proc_task_proxy);
+  _g1h->set_par_threads(0);
+}
+
+// Gang task for parallel reference enqueueing.
+
+class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
+  EnqueueTask& _enq_task;
+
+public:
+  G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
+    AbstractGangTask("Enqueue reference objects in parallel"),
+    _enq_task(enq_task)
+  { }
+
+  virtual void work(int i) {
+    _enq_task.work(i);
+  }
+};
+
+// Driver routine for parallel reference enqueing.
+// Creates an instance of the ref enqueueing gang
+// task and has the worker threads execute it.
+
+void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
+  assert(_workers != NULL, "Need parallel worker threads.");
+
+  G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
+
+  _g1h->set_par_threads(_active_workers);
+  _workers->run_task(&enq_task_proxy);
+  _g1h->set_par_threads(0);
+}
+
+// End of weak reference support closures
+
+// Abstract task used to preserve (i.e. copy) any referent objects
+// that are in the collection set and are pointed to by reference
+// objects discovered by the CM ref processor.
+
+class G1ParPreserveCMReferentsTask: public AbstractGangTask {
+protected:
+  G1CollectedHeap* _g1h;
+  RefToScanQueueSet      *_queues;
+  ParallelTaskTerminator _terminator;
+  int _n_workers;
+
+public:
+  G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
+    AbstractGangTask("ParPreserveCMReferents"),
+    _g1h(g1h),
+    _queues(task_queues),
+    _terminator(workers, _queues),
+    _n_workers(workers)
+  { }
+
+  void work(int i) {
+    ResourceMark rm;
+    HandleMark   hm;
+
+    G1ParScanThreadState            pss(_g1h, i);
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
+
+    pss.set_evac_closure(&scan_evac_cl);
+    pss.set_evac_failure_closure(&evac_failure_cl);
+    pss.set_partial_scan_closure(&partial_scan_cl);
+
+    assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+
+
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
+    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
+
+    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
+      // We also need to mark copied objects.
+      copy_non_heap_cl = &copy_mark_non_heap_cl;
+      copy_perm_cl = &copy_mark_perm_cl;
+    }
+
+    // Is alive closure
+    G1AlwaysAliveClosure always_alive(_g1h);
+
+    // Copying keep alive closure. Applied to referent objects that need
+    // to be copied.
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
+
+    ReferenceProcessor* rp = _g1h->ref_processor_cm();
+
+    int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
+    int stride = MIN2(MAX2(_n_workers, 1), limit);
+
+    // limit is set using max_num_q() - which was set using ParallelGCThreads.
+    // So this must be true - but assert just in case someone decides to
+    // change the worker ids.
+    assert(0 <= i && i < limit, "sanity");
+    assert(!rp->discovery_is_atomic(), "check this code");
+
+    // Select discovered lists [i, i+stride, i+2*stride,...,limit)
+    for (int idx = i; idx < limit; idx += stride) {
+      DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
+
+      DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
+      while (iter.has_next()) {
+        // Since discovery is not atomic for the CM ref processor, we
+        // can see some null referent objects.
+        iter.load_ptrs(DEBUG_ONLY(true));
+        oop ref = iter.obj();
+
+        // This will filter nulls.
+        if (iter.is_referent_alive()) {
+          iter.make_referent_alive();
+        }
+        iter.move_to_next();
+      }
+    }
+
+    // Drain the queue - which may cause stealing
+    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
+    drain_queue.do_void();
+    // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
+    assert(pss.refs()->is_empty(), "should be");
+  }
+};
+
+// Weak Reference processing during an evacuation pause (part 1).
+void G1CollectedHeap::process_discovered_references() {
+  double ref_proc_start = os::elapsedTime();
+
+  ReferenceProcessor* rp = _ref_processor_stw;
+  assert(rp->discovery_enabled(), "should have been enabled");
+
+  // Any reference objects, in the collection set, that were 'discovered'
+  // by the CM ref processor should have already been copied (either by
+  // applying the external root copy closure to the discovered lists, or
+  // by following an RSet entry).
+  //
+  // But some of the referents, that are in the collection set, that these
+  // reference objects point to may not have been copied: the STW ref
+  // processor would have seen that the reference object had already
+  // been 'discovered' and would have skipped discovering the reference,
+  // but would not have treated the reference object as a regular oop.
+  // As a reult the copy closure would not have been applied to the
+  // referent object.
+  //
+  // We need to explicitly copy these referent objects - the references
+  // will be processed at the end of remarking.
+  //
+  // We also need to do this copying before we process the reference
+  // objects discovered by the STW ref processor in case one of these
+  // referents points to another object which is also referenced by an
+  // object discovered by the STW ref processor.
+
+  int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                        workers()->total_workers() : 1);
+
+  set_par_threads(n_workers);
+  G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues);
+
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    workers()->run_task(&keep_cm_referents);
+  } else {
+    keep_cm_referents.work(0);
+  }
+
+  set_par_threads(0);
+
+  // Closure to test whether a referent is alive.
+  G1STWIsAliveClosure is_alive(this);
+
+  // Even when parallel reference processing is enabled, the processing
+  // of JNI refs is serial and performed serially by the current thread
+  // rather than by a worker. The following PSS will be used for processing
+  // JNI refs.
+
+  // Use only a single queue for this PSS.
+  G1ParScanThreadState pss(this, 0);
+
+  // We do not embed a reference processor in the copying/scanning
+  // closures while we're actually processing the discovered
+  // reference objects.
+  G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
+  G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
+  G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
+
+  pss.set_evac_closure(&scan_evac_cl);
+  pss.set_evac_failure_closure(&evac_failure_cl);
+  pss.set_partial_scan_closure(&partial_scan_cl);
+
+  assert(pss.refs()->is_empty(), "pre-condition");
+
+  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
+  G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
+
+  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
+  G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
+
+  OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
+  OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
+
+  if (_g1h->g1_policy()->during_initial_mark_pause()) {
+    // We also need to mark copied objects.
+    copy_non_heap_cl = &copy_mark_non_heap_cl;
+    copy_perm_cl = &copy_mark_perm_cl;
+  }
+
+  // Keep alive closure.
+  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
+
+  // Serial Complete GC closure
+  G1STWDrainQueueClosure drain_queue(this, &pss);
+
+  // Setup the soft refs policy...
+  rp->setup_policy(false);
+
+  if (!rp->processing_is_mt()) {
+    // Serial reference processing...
+    rp->process_discovered_references(&is_alive,
+                                      &keep_alive,
+                                      &drain_queue,
+                                      NULL);
+  } else {
+    // Parallel reference processing
+    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
+    assert(rp->num_q() == active_workers, "sanity");
+    assert(active_workers <= rp->max_num_q(), "sanity");
+
+    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+    rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
+  }
+
+  // We have completed copying any necessary live referent objects
+  // (that were not copied during the actual pause) so we can
+  // retire any active alloc buffers
+  pss.retire_alloc_buffers();
+  assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+
+  double ref_proc_time = os::elapsedTime() - ref_proc_start;
+  g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
+}
+
+// Weak Reference processing during an evacuation pause (part 2).
+void G1CollectedHeap::enqueue_discovered_references() {
+  double ref_enq_start = os::elapsedTime();
+
+  ReferenceProcessor* rp = _ref_processor_stw;
+  assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
+
+  // Now enqueue any remaining on the discovered lists on to
+  // the pending list.
+  if (!rp->processing_is_mt()) {
+    // Serial reference processing...
+    rp->enqueue_discovered_references();
+  } else {
+    // Parallel reference enqueuing
+
+    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
+    assert(rp->num_q() == active_workers, "sanity");
+    assert(active_workers <= rp->max_num_q(), "sanity");
+
+    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+    rp->enqueue_discovered_references(&par_task_executor);
+  }
+
+  rp->verify_no_references_recorded();
+  assert(!rp->discovery_enabled(), "should have been disabled");
+
+  // FIXME
+  // CM's reference processing also cleans up the string and symbol tables.
+  // Should we do that here also? We could, but it is a serial operation
+  // and could signicantly increase the pause time.
+
+  double ref_enq_time = os::elapsedTime() - ref_enq_start;
+  g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
+}
+
 void G1CollectedHeap::evacuate_collection_set() {
   set_evacuation_failed(false);
 
@@ -4658,6 +5266,7 @@
 
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
   double start_par = os::elapsedTime();
+
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     // The individual threads will set their evac-failure closures.
     StrongRootsScope srs(this);
@@ -4672,15 +5281,23 @@
   g1_policy()->record_par_time(par_time);
   set_par_threads(0);
 
+  // Process any discovered reference objects - we have
+  // to do this _before_ we retire the GC alloc regions
+  // as we may have to copy some 'reachable' referent
+  // objects (and their reachable sub-graphs) that were
+  // not copied during the pause.
+  process_discovered_references();
+
   // Weak root processing.
   // Note: when JSR 292 is enabled and code blobs can contain
   // non-perm oops then we will need to process the code blobs
   // here too.
   {
-    G1IsAliveClosure is_alive(this);
+    G1STWIsAliveClosure is_alive(this);
     G1KeepAliveClosure keep_alive(this);
     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
   }
+
   release_gc_alloc_regions();
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
@@ -4702,6 +5319,15 @@
     }
   }
 
+  // Enqueue any remaining references remaining on the STW
+  // reference processor's discovered lists. We need to do
+  // this after the card table is cleaned (and verified) as
+  // the act of enqueuing entries on to the pending list
+  // will log these updates (and dirty their associated
+  // cards). We need these updates logged to update any
+  // RSets.
+  enqueue_discovered_references();
+
   if (G1DeferredRSUpdate) {
     RedirtyLoggedCardTableEntryFastClosure redirty;
     dirty_card_queue_set().set_closure(&redirty);
@@ -4902,7 +5528,7 @@
   }
 
   double elapsed = os::elapsedTime() - start;
-  g1_policy()->record_clear_ct_time( elapsed * 1000.0);
+  g1_policy()->record_clear_ct_time(elapsed * 1000.0);
 #ifndef PRODUCT
   if (G1VerifyCTCleanup || VerifyAfterGC) {
     G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
@@ -5193,7 +5819,6 @@
       g1_policy()->update_region_num(true /* next_is_young */);
       set_region_short_lived_locked(new_alloc_region);
       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
-      g1mm()->update_eden_counters();
       return new_alloc_region;
     }
   }
@@ -5208,6 +5833,10 @@
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
   _summary_bytes_used += allocated_bytes;
   _hr_printer.retire(alloc_region);
+  // We update the eden sizes here, when the region is retired,
+  // instead of when it's allocated, since this is the point that its
+  // used space has been recored in _summary_bytes_used.
+  g1mm()->update_eden_size();
 }
 
 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -155,6 +155,19 @@
     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 };
 
+// The G1 STW is alive closure.
+// An instance is embedded into the G1CH and used as the
+// (optional) _is_alive_non_header closure in the STW
+// reference processor. It is also extensively used during
+// refence processing during STW evacuation pauses.
+class G1STWIsAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
+public:
+  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  void do_object(oop p) { assert(false, "Do not call."); }
+  bool do_object_b(oop p);
+};
+
 class SurvivorGCAllocRegion : public G1AllocRegion {
 protected:
   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
@@ -174,6 +187,7 @@
 };
 
 class RefineCardTableEntryClosure;
+
 class G1CollectedHeap : public SharedHeap {
   friend class VM_G1CollectForAllocation;
   friend class VM_GenCollectForPermanentAllocation;
@@ -573,9 +587,20 @@
   // allocated block, or else "NULL".
   HeapWord* expand_and_allocate(size_t word_size);
 
+  // Process any reference objects discovered during
+  // an incremental evacuation pause.
+  void process_discovered_references();
+
+  // Enqueue any remaining discovered references
+  // after processing.
+  void enqueue_discovered_references();
+
 public:
 
-  G1MonitoringSupport* g1mm() { return _g1mm; }
+  G1MonitoringSupport* g1mm() {
+    assert(_g1mm != NULL, "should have been initialized");
+    return _g1mm;
+  }
 
   // Expand the garbage-first heap by at least the given size (in bytes!).
   // Returns true if the heap was expanded by the requested amount;
@@ -822,17 +847,87 @@
   void finalize_for_evac_failure();
 
   // An attempt to evacuate "obj" has failed; take necessary steps.
-  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
+  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
+                                    bool should_mark_root);
   void handle_evacuation_failure_common(oop obj, markOop m);
 
+  // ("Weak") Reference processing support.
+  //
+  // G1 has 2 instances of the referece processor class. One
+  // (_ref_processor_cm) handles reference object discovery
+  // and subsequent processing during concurrent marking cycles.
+  //
+  // The other (_ref_processor_stw) handles reference object
+  // discovery and processing during full GCs and incremental
+  // evacuation pauses.
+  //
+  // During an incremental pause, reference discovery will be
+  // temporarily disabled for _ref_processor_cm and will be
+  // enabled for _ref_processor_stw. At the end of the evacuation
+  // pause references discovered by _ref_processor_stw will be
+  // processed and discovery will be disabled. The previous
+  // setting for reference object discovery for _ref_processor_cm
+  // will be re-instated.
+  //
+  // At the start of marking:
+  //  * Discovery by the CM ref processor is verified to be inactive
+  //    and it's discovered lists are empty.
+  //  * Discovery by the CM ref processor is then enabled.
+  //
+  // At the end of marking:
+  //  * Any references on the CM ref processor's discovered
+  //    lists are processed (possibly MT).
+  //
+  // At the start of full GC we:
+  //  * Disable discovery by the CM ref processor and
+  //    empty CM ref processor's discovered lists
+  //    (without processing any entries).
+  //  * Verify that the STW ref processor is inactive and it's
+  //    discovered lists are empty.
+  //  * Temporarily set STW ref processor discovery as single threaded.
+  //  * Temporarily clear the STW ref processor's _is_alive_non_header
+  //    field.
+  //  * Finally enable discovery by the STW ref processor.
+  //
+  // The STW ref processor is used to record any discovered
+  // references during the full GC.
+  //
+  // At the end of a full GC we:
+  //  * Enqueue any reference objects discovered by the STW ref processor
+  //    that have non-live referents. This has the side-effect of
+  //    making the STW ref processor inactive by disabling discovery.
+  //  * Verify that the CM ref processor is still inactive
+  //    and no references have been placed on it's discovered
+  //    lists (also checked as a precondition during initial marking).
+
+  // The (stw) reference processor...
+  ReferenceProcessor* _ref_processor_stw;
+
+  // During reference object discovery, the _is_alive_non_header
+  // closure (if non-null) is applied to the referent object to
+  // determine whether the referent is live. If so then the
+  // reference object does not need to be 'discovered' and can
+  // be treated as a regular oop. This has the benefit of reducing
+  // the number of 'discovered' reference objects that need to
+  // be processed.
+  //
+  // Instance of the is_alive closure for embedding into the
+  // STW reference processor as the _is_alive_non_header field.
+  // Supplying a value for the _is_alive_non_header field is
+  // optional but doing so prevents unnecessary additions to
+  // the discovered lists during reference discovery.
+  G1STWIsAliveClosure _is_alive_closure_stw;
+
+  // The (concurrent marking) reference processor...
+  ReferenceProcessor* _ref_processor_cm;
+
   // Instance of the concurrent mark is_alive closure for embedding
-  // into the reference processor as the is_alive_non_header. This
-  // prevents unnecessary additions to the discovered lists during
-  // concurrent discovery.
-  G1CMIsAliveClosure _is_alive_closure;
-
-  // ("Weak") Reference processing support
-  ReferenceProcessor* _ref_processor;
+  // into the Concurrent Marking reference processor as the
+  // _is_alive_non_header field. Supplying a value for the
+  // _is_alive_non_header field is optional but doing so prevents
+  // unnecessary additions to the discovered lists during reference
+  // discovery.
+  G1CMIsAliveClosure _is_alive_closure_cm;
 
   enum G1H_process_strong_roots_tasks {
     G1H_PS_mark_stack_oops_do,
@@ -873,6 +968,7 @@
   // specified by the policy object.
   jint initialize();
 
+  // Initialize weak reference processing.
   virtual void ref_processing_init();
 
   void set_par_threads(int t) {
@@ -924,8 +1020,13 @@
   // The shared block offset table array.
   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
 
-  // Reference Processing accessor
-  ReferenceProcessor* ref_processor() { return _ref_processor; }
+  // Reference Processing accessors
+
+  // The STW reference processor....
+  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
+
+  // The Concurent Marking reference processor...
+  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 
   virtual size_t capacity() const;
   virtual size_t used() const;
@@ -1236,12 +1337,7 @@
   // storage in the heap comes from a young region or not.
   // See ReduceInitialCardMarks.
   virtual bool can_elide_tlab_store_barriers() const {
-    // 6920090: Temporarily disabled, because of lingering
-    // instabilities related to RICM with G1. In the
-    // interim, the option ReduceInitialCardMarksForG1
-    // below is left solely as a debugging device at least
-    // until 6920109 fixes the instabilities.
-    return ReduceInitialCardMarksForG1;
+    return true;
   }
 
   virtual bool card_mark_must_follow_store() const {
@@ -1265,8 +1361,6 @@
   // update logging post-barrier, we don't maintain remembered set
   // information for young gen objects.
   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
-    // Re 6920090, 6920109 above.
-    assert(ReduceInitialCardMarksForG1, "Else cannot be here");
     return is_in_young(new_obj);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -152,8 +152,12 @@
 
   _summary(new Summary()),
 
+  _cur_clear_ct_time_ms(0.0),
+
+  _cur_ref_proc_time_ms(0.0),
+  _cur_ref_enq_time_ms(0.0),
+
 #ifndef PRODUCT
-  _cur_clear_ct_time_ms(0.0),
   _min_clear_cc_time_ms(-1.0),
   _max_clear_cc_time_ms(-1.0),
   _cur_clear_cc_time_ms(0.0),
@@ -294,10 +298,10 @@
   }
 
   // Verify PLAB sizes
-  const uint region_size = HeapRegion::GrainWords;
+  const size_t region_size = HeapRegion::GrainWords;
   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
     char buffer[128];
-    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
+    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
                  OldPLABSize > region_size ? "Old" : "Young", region_size);
     vm_exit_during_initialization(buffer);
   }
@@ -459,15 +463,16 @@
 // ParallelScavengeHeap::initialize()). We might change this in the
 // future, but it's a good start.
 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
+private:
+  size_t size_to_region_num(size_t byte_size) {
+    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
+  }
 
 public:
   G1YoungGenSizer() {
     initialize_flags();
     initialize_size_info();
   }
-  size_t size_to_region_num(size_t byte_size) {
-    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
-  }
   size_t min_young_region_num() {
     return size_to_region_num(_min_gen0_size);
   }
@@ -501,11 +506,10 @@
 
   if (FLAG_IS_CMDLINE(NewRatio)) {
     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
-      gclog_or_tty->print_cr("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
+      warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
     } else {
       // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
-      size_t heap_regions = sizer.size_to_region_num(_g1->n_regions());
-      update_young_list_size_using_newratio(heap_regions);
+      update_young_list_size_using_newratio(_g1->n_regions());
       _using_new_ratio_calculations = true;
     }
   }
@@ -1479,6 +1483,8 @@
 #endif
     print_stats(1, "Other", other_time_ms);
     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
+    print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
+    print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
 
     for (int i = 0; i < _aux_num; ++i) {
       if (_cur_aux_times_set[i]) {
@@ -1519,11 +1525,17 @@
   }
 
   if (_last_full_young_gc) {
-    ergo_verbose2(ErgoPartiallyYoungGCs,
-                  "start partially-young GCs",
-                  ergo_format_byte_perc("known garbage"),
-                  _known_garbage_bytes, _known_garbage_ratio * 100.0);
-    set_full_young_gcs(false);
+    if (!last_pause_included_initial_mark) {
+      ergo_verbose2(ErgoPartiallyYoungGCs,
+                    "start partially-young GCs",
+                    ergo_format_byte_perc("known garbage"),
+                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
+      set_full_young_gcs(false);
+    } else {
+      ergo_verbose0(ErgoPartiallyYoungGCs,
+                    "do not start partially-young GCs",
+                    ergo_format_reason("concurrent cycle is about to start"));
+    }
     _last_full_young_gc = false;
   }
 
@@ -2485,6 +2497,13 @@
       // initiate a new cycle.
 
       set_during_initial_mark_pause();
+      // We do not allow non-full young GCs during marking.
+      if (!full_young_gcs()) {
+        set_full_young_gcs(true);
+        ergo_verbose0(ErgoPartiallyYoungGCs,
+                      "end partially-young GCs",
+                      ergo_format_reason("concurrent cycle is about to start"));
+      }
 
       // And we can now clear initiate_conc_mark_if_possible() as
       // we've already acted on it.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -119,6 +119,8 @@
   double _cur_satb_drain_time_ms;
   double _cur_clear_ct_time_ms;
   bool   _satb_drain_time_set;
+  double _cur_ref_proc_time_ms;
+  double _cur_ref_enq_time_ms;
 
 #ifndef PRODUCT
   // Card Table Count Cache stats
@@ -986,6 +988,14 @@
     _cur_aux_times_ms[i] += ms;
   }
 
+  void record_ref_proc_time(double ms) {
+    _cur_ref_proc_time_ms = ms;
+  }
+
+  void record_ref_enq_time(double ms) {
+    _cur_ref_enq_time_ms = ms;
+  }
+
 #ifndef PRODUCT
   void record_cc_clear_time(double ms) {
     if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
@@ -1139,6 +1149,10 @@
     return young_list_length < young_list_max_length;
   }
 
+  size_t young_list_max_length() {
+    return _young_list_max_length;
+  }
+
   void update_region_num(bool young);
 
   bool full_young_gcs() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -62,6 +62,8 @@
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
+  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
+
   GenMarkSweep::_ref_processor = rp;
   rp->setup_policy(clear_all_softrefs);
 
@@ -139,6 +141,8 @@
 
   // Process reference objects found during marking
   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
+  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
+
   rp->setup_policy(clear_all_softrefs);
   rp->process_discovered_references(&GenMarkSweep::is_alive,
                                     &GenMarkSweep::keep_alive,
@@ -166,7 +170,6 @@
   GenMarkSweep::follow_mdo_weak_refs();
   assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
 
-
   // Visit interned string tables and delete unmarked oops
   StringTable::unlink(&GenMarkSweep::is_alive);
   // Clean up unreferenced symbols in symbol table.
@@ -346,7 +349,8 @@
                            NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_pointer_closure);
 
-  g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
+  assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
+  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -27,19 +27,69 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 
-G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
-                                         VirtualSpace* g1_storage_addr) :
+G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
+                                           const char* name,
+                                           int ordinal, int spaces,
+                                           size_t min_capacity,
+                                           size_t max_capacity,
+                                           size_t curr_capacity)
+  : GenerationCounters(name, ordinal, spaces, min_capacity,
+                       max_capacity, curr_capacity), _g1mm(g1mm) { }
+
+// We pad the capacity three times given that the young generation
+// contains three spaces (eden and two survivors).
+G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm,
+                                                     const char* name)
+  : G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */,
+               G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
+               G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
+               G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
+  update_all();
+}
+
+G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
+                                                 const char* name)
+  : G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */,
+               G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
+               G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
+               G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
+  update_all();
+}
+
+void G1YoungGenerationCounters::update_all() {
+  size_t committed =
+            G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
+  _current_size->set_value(committed);
+}
+
+void G1OldGenerationCounters::update_all() {
+  size_t committed =
+            G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
+  _current_size->set_value(committed);
+}
+
+G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
   _g1h(g1h),
   _incremental_collection_counters(NULL),
   _full_collection_counters(NULL),
-  _non_young_collection_counters(NULL),
+  _old_collection_counters(NULL),
   _old_space_counters(NULL),
   _young_collection_counters(NULL),
   _eden_counters(NULL),
   _from_counters(NULL),
   _to_counters(NULL),
-  _g1_storage_addr(g1_storage_addr)
-{
+
+  _overall_reserved(0),
+  _overall_committed(0),    _overall_used(0),
+  _young_region_num(0),
+  _young_gen_committed(0),
+  _eden_committed(0),       _eden_used(0),
+  _survivor_committed(0),   _survivor_used(0),
+  _old_committed(0),        _old_used(0) {
+
+  _overall_reserved = g1h->max_capacity();
+  recalculate_sizes();
+
   // Counters for GC collections
   //
   //  name "collector.0".  In a generational collector this would be the
@@ -69,110 +119,147 @@
   // generational GC terms.  The "1, 1" parameters are for
   // the n-th generation (=1) with 1 space.
   // Counters are created from minCapacity, maxCapacity, and capacity
-  _non_young_collection_counters =
-    new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
+  _old_collection_counters = new G1OldGenerationCounters(this, "old");
 
   //  name  "generation.1.space.0"
   // Counters are created from maxCapacity, capacity, initCapacity,
   // and used.
-  _old_space_counters = new HSpaceCounters("space", 0,
-    _g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
+  _old_space_counters = new HSpaceCounters("space", 0 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(old_space_committed()) /* init_capacity */,
+   _old_collection_counters);
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
   //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
-  // See  _non_young_collection_counters for additional counters
-  _young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
+  // See  _old_collection_counters for additional counters
+  _young_collection_counters = new G1YoungGenerationCounters(this, "young");
 
-  // Replace "max_heap_byte_size() with maximum young gen size for
-  // g1Collectedheap
   //  name "generation.0.space.0"
   // See _old_space_counters for additional counters
-  _eden_counters = new HSpaceCounters("eden", 0,
-    _g1h->max_capacity(), eden_space_committed(),
+  _eden_counters = new HSpaceCounters("eden", 0 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(eden_space_committed()) /* init_capacity */,
     _young_collection_counters);
 
   //  name "generation.0.space.1"
   // See _old_space_counters for additional counters
   // Set the arguments to indicate that this survivor space is not used.
-  _from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
+  _from_counters = new HSpaceCounters("s0", 1 /* ordinal */,
+    pad_capacity(0) /* max_capacity */,
+    pad_capacity(0) /* init_capacity */,
     _young_collection_counters);
+  // Given that this survivor space is not used, we update it here
+  // once to reflect that its used space is 0 so that we don't have to
+  // worry about updating it again later.
+  _from_counters->update_used(0);
 
   //  name "generation.0.space.2"
   // See _old_space_counters for additional counters
-  _to_counters = new HSpaceCounters("s1", 2,
-    _g1h->max_capacity(),
-    survivor_space_committed(),
+  _to_counters = new HSpaceCounters("s1", 2 /* ordinal */,
+    pad_capacity(overall_reserved()) /* max_capacity */,
+    pad_capacity(survivor_space_committed()) /* init_capacity */,
     _young_collection_counters);
 }
 
-size_t G1MonitoringSupport::overall_committed() {
-  return g1h()->capacity();
-}
+void G1MonitoringSupport::recalculate_sizes() {
+  G1CollectedHeap* g1 = g1h();
+
+  // Recalculate all the sizes from scratch. We assume that this is
+  // called at a point where no concurrent updates to the various
+  // values we read here are possible (i.e., at a STW phase at the end
+  // of a GC).
 
-size_t G1MonitoringSupport::overall_used() {
-  return g1h()->used_unlocked();
-}
+  size_t young_list_length = g1->young_list()->length();
+  size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
+  assert(young_list_length >= survivor_list_length, "invariant");
+  size_t eden_list_length = young_list_length - survivor_list_length;
+  // Max length includes any potential extensions to the young gen
+  // we'll do when the GC locker is active.
+  size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
+  assert(young_list_max_length >= survivor_list_length, "invariant");
+  size_t eden_list_max_length = young_list_max_length - survivor_list_length;
 
-size_t G1MonitoringSupport::eden_space_committed() {
-  return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
-}
+  _overall_used = g1->used_unlocked();
+  _eden_used = eden_list_length * HeapRegion::GrainBytes;
+  _survivor_used = survivor_list_length * HeapRegion::GrainBytes;
+  _young_region_num = young_list_length;
+  _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
+
+  // First calculate the committed sizes that can be calculated independently.
+  _survivor_committed = _survivor_used;
+  _old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
 
-size_t G1MonitoringSupport::eden_space_used() {
-  size_t young_list_length = g1h()->young_list()->length();
-  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
-  size_t survivor_used = survivor_space_used();
-  eden_used = subtract_up_to_zero(eden_used, survivor_used);
-  return eden_used;
-}
+  // Next, start with the overall committed size.
+  _overall_committed = g1->capacity();
+  size_t committed = _overall_committed;
+
+  // Remove the committed size we have calculated so far (for the
+  // survivor and old space).
+  assert(committed >= (_survivor_committed + _old_committed), "sanity");
+  committed -= _survivor_committed + _old_committed;
+
+  // Next, calculate and remove the committed size for the eden.
+  _eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
+  // Somewhat defensive: be robust in case there are inaccuracies in
+  // the calculations
+  _eden_committed = MIN2(_eden_committed, committed);
+  committed -= _eden_committed;
 
-size_t G1MonitoringSupport::survivor_space_committed() {
-  return MAX2(survivor_space_used(),
-              (size_t) HeapRegion::GrainBytes);
-}
+  // Finally, give the rest to the old space...
+  _old_committed += committed;
+  // ..and calculate the young gen committed.
+  _young_gen_committed = _eden_committed + _survivor_committed;
 
-size_t G1MonitoringSupport::survivor_space_used() {
-  size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
-  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
-  return survivor_used;
+  assert(_overall_committed ==
+         (_eden_committed + _survivor_committed + _old_committed),
+         "the committed sizes should add up");
+  // Somewhat defensive: cap the eden used size to make sure it
+  // never exceeds the committed size.
+  _eden_used = MIN2(_eden_used, _eden_committed);
+  // _survivor_committed and _old_committed are calculated in terms of
+  // the corresponding _*_used value, so the next two conditions
+  // should hold.
+  assert(_survivor_used <= _survivor_committed, "post-condition");
+  assert(_old_used <= _old_committed, "post-condition");
 }
 
-size_t G1MonitoringSupport::old_space_committed() {
-  size_t committed = overall_committed();
-  size_t eden_committed = eden_space_committed();
-  size_t survivor_committed = survivor_space_committed();
-  committed = subtract_up_to_zero(committed, eden_committed);
-  committed = subtract_up_to_zero(committed, survivor_committed);
-  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
-  return committed;
-}
+void G1MonitoringSupport::recalculate_eden_size() {
+  G1CollectedHeap* g1 = g1h();
+
+  // When a new eden region is allocated, only the eden_used size is
+  // affected (since we have recalculated everything else at the last GC).
 
-// See the comment near the top of g1MonitoringSupport.hpp for
-// an explanation of these calculations for "used" and "capacity".
-size_t G1MonitoringSupport::old_space_used() {
-  size_t used = overall_used();
-  size_t eden_used = eden_space_used();
-  size_t survivor_used = survivor_space_used();
-  used = subtract_up_to_zero(used, eden_used);
-  used = subtract_up_to_zero(used, survivor_used);
-  return used;
-}
-
-void G1MonitoringSupport::update_counters() {
-  if (UsePerfData) {
-    eden_counters()->update_capacity(eden_space_committed());
-    eden_counters()->update_used(eden_space_used());
-    to_counters()->update_capacity(survivor_space_committed());
-    to_counters()->update_used(survivor_space_used());
-    old_space_counters()->update_capacity(old_space_committed());
-    old_space_counters()->update_used(old_space_used());
-    non_young_collection_counters()->update_all();
+  size_t young_region_num = g1h()->young_list()->length();
+  if (young_region_num > _young_region_num) {
+    size_t diff = young_region_num - _young_region_num;
+    _eden_used += diff * HeapRegion::GrainBytes;
+    // Somewhat defensive: cap the eden used size to make sure it
+    // never exceeds the committed size.
+    _eden_used = MIN2(_eden_used, _eden_committed);
+    _young_region_num = young_region_num;
   }
 }
 
-void G1MonitoringSupport::update_eden_counters() {
+void G1MonitoringSupport::update_sizes() {
+  recalculate_sizes();
   if (UsePerfData) {
-    eden_counters()->update_capacity(eden_space_committed());
+    eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
+    eden_counters()->update_used(eden_space_used());
+    // only the to survivor space (s1) is active, so we don't need to
+    // update the counteres for the from survivor space (s0)
+    to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
+    to_counters()->update_used(survivor_space_used());
+    old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
+    old_space_counters()->update_used(old_space_used());
+    old_collection_counters()->update_all();
+    young_collection_counters()->update_all();
+  }
+}
+
+void G1MonitoringSupport::update_eden_size() {
+  recalculate_eden_size();
+  if (UsePerfData) {
     eden_counters()->update_used(eden_space_used());
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -28,101 +28,95 @@
 #include "gc_implementation/shared/hSpaceCounters.hpp"
 
 class G1CollectedHeap;
-class G1SpaceMonitoringSupport;
 
-// Class for monitoring logical spaces in G1.
-// G1 defines a set of regions as a young
-// collection (analogous to a young generation).
-// The young collection is a logical generation
-// with no fixed chunk (see space.hpp) reflecting
-// the address space for the generation.  In addition
-// to the young collection there is its complement
-// the non-young collection that is simply the regions
-// not in the young collection.  The non-young collection
-// is treated here as a logical old generation only
-// because the monitoring tools expect a generational
-// heap.  The monitoring tools expect that a Space
-// (see space.hpp) exists that describe the
-// address space of young collection and non-young
-// collection and such a view is provided here.
+// Class for monitoring logical spaces in G1. It provides data for
+// both G1's jstat counters as well as G1's memory pools.
+//
+// G1 splits the heap into heap regions and each heap region belongs
+// to one of the following categories:
+//
+// * eden      : regions that have been allocated since the last GC
+// * survivors : regions with objects that survived the last few GCs
+// * old       : long-lived non-humongous regions
+// * humongous : humongous regions
+// * free      : free regions
+//
+// The combination of eden and survivor regions form the equivalent of
+// the young generation in the other GCs. The combination of old and
+// humongous regions form the equivalent of the old generation in the
+// other GCs. Free regions do not have a good equivalent in the other
+// GCs given that they can be allocated as any of the other region types.
 //
-// This class provides interfaces to access
-// the value of variables for the young collection
-// that include the "capacity" and "used" of the
-// young collection along with constant values
-// for the minimum and maximum capacities for
-// the logical spaces.  Similarly for the non-young
-// collection.
-//
-// Also provided are counters for G1 concurrent collections
-// and stop-the-world full heap collecitons.
+// The monitoring tools expect the heap to contain a number of
+// generations (young, old, perm) and each generation to contain a
+// number of spaces (young: eden, survivors, old). Given that G1 does
+// not maintain those spaces physically (e.g., the set of
+// non-contiguous eden regions can be considered as a "logical"
+// space), we'll provide the illusion that those generations and
+// spaces exist. In reality, each generation and space refers to a set
+// of heap regions that are potentially non-contiguous.
 //
-// Below is a description of how "used" and "capactiy"
-// (or committed) is calculated for the logical spaces.
+// This class provides interfaces to access the min, current, and max
+// capacity and current occupancy for each of G1's logical spaces and
+// generations we expose to the monitoring tools. Also provided are
+// counters for G1 concurrent collections and stop-the-world full heap
+// collections.
 //
-// 1) The used space calculation for a pool is not necessarily
-// independent of the others. We can easily get from G1 the overall
-// used space in the entire heap, the number of regions in the young
-// generation (includes both eden and survivors), and the number of
-// survivor regions. So, from that we calculate:
+// Below is a description of how the various sizes are calculated.
 //
-//  survivor_used = survivor_num * region_size
-//  eden_used     = young_region_num * region_size - survivor_used
-//  old_gen_used  = overall_used - eden_used - survivor_used
+// * Current Capacity
 //
-// Note that survivor_used and eden_used are upper bounds. To get the
-// actual value we would have to iterate over the regions and add up
-// ->used(). But that'd be expensive. So, we'll accept some lack of
-// accuracy for those two. But, we have to be careful when calculating
-// old_gen_used, in case we subtract from overall_used more then the
-// actual number and our result goes negative.
+//    - heap_capacity = current heap capacity (e.g., current committed size)
+//    - young_gen_capacity = current max young gen target capacity
+//          (i.e., young gen target capacity + max allowed expansion capacity)
+//    - survivor_capacity = current survivor region capacity
+//    - eden_capacity = young_gen_capacity - survivor_capacity
+//    - old_capacity = heap_capacity - young_gen_capacity
+//
+//    What we do in the above is to distribute the free regions among
+//    eden_capacity and old_capacity.
 //
-// 2) Calculating the used space is straightforward, as described
-// above. However, how do we calculate the committed space, given that
-// we allocate space for the eden, survivor, and old gen out of the
-// same pool of regions? One way to do this is to use the used value
-// as also the committed value for the eden and survivor spaces and
-// then calculate the old gen committed space as follows:
+// * Occupancy
 //
-//  old_gen_committed = overall_committed - eden_committed - survivor_committed
+//    - young_gen_used = current young region capacity
+//    - survivor_used = survivor_capacity
+//    - eden_used = young_gen_used - survivor_used
+//    - old_used = overall_used - young_gen_used
 //
-// Maybe a better way to do that would be to calculate used for eden
-// and survivor as a sum of ->used() over their regions and then
-// calculate committed as region_num * region_size (i.e., what we use
-// to calculate the used space now). This is something to consider
-// in the future.
+//    Unfortunately, we currently only keep track of the number of
+//    currently allocated young and survivor regions + the overall used
+//    bytes in the heap, so the above can be a little inaccurate.
+//
+// * Min Capacity
 //
-// 3) Another decision that is again not straightforward is what is
-// the max size that each memory pool can grow to. One way to do this
-// would be to use the committed size for the max for the eden and
-// survivors and calculate the old gen max as follows (basically, it's
-// a similar pattern to what we use for the committed space, as
-// described above):
+//    We set this to 0 for all spaces. We could consider setting the old
+//    min capacity to the min capacity of the heap (see 7078465).
+//
+// * Max Capacity
 //
-//  old_gen_max = overall_max - eden_max - survivor_max
+//    For jstat, we set the max capacity of all spaces to heap_capacity,
+//    given that we don't always have a reasonably upper bound on how big
+//    each space can grow. For the memory pools, we actually make the max
+//    capacity undefined. We could consider setting the old max capacity
+//    to the max capacity of the heap (see 7078465).
 //
-// Unfortunately, the above makes the max of each pool fluctuate over
-// time and, even though this is allowed according to the spec, it
-// broke several assumptions in the M&M framework (there were cases
-// where used would reach a value greater than max). So, for max we
-// use -1, which means "undefined" according to the spec.
+// If we had more accurate occupancy / capacity information per
+// region set the above calculations would be greatly simplified and
+// be made more accurate.
 //
-// 4) Now, there is a very subtle issue with all the above. The
-// framework will call get_memory_usage() on the three pools
-// asynchronously. As a result, each call might get a different value
-// for, say, survivor_num which will yield inconsistent values for
-// eden_used, survivor_used, and old_gen_used (as survivor_num is used
-// in the calculation of all three). This would normally be
-// ok. However, it's possible that this might cause the sum of
-// eden_used, survivor_used, and old_gen_used to go over the max heap
-// size and this seems to sometimes cause JConsole (and maybe other
-// clients) to get confused. There's not a really an easy / clean
-// solution to this problem, due to the asynchrounous nature of the
-// framework.
+// We update all the above synchronously and we store the results in
+// fields so that we just read said fields when needed. A subtle point
+// is that all the above sizes need to be recalculated when the old
+// gen changes capacity (after a GC or after a humongous allocation)
+// but only the eden occupancy changes when a new eden region is
+// allocated. So, in the latter case we have minimal recalcuation to
+// do which is important as we want to keep the eden region allocation
+// path as low-overhead as possible.
 
 class G1MonitoringSupport : public CHeapObj {
+  friend class VMStructs;
+
   G1CollectedHeap* _g1h;
-  VirtualSpace* _g1_storage_addr;
 
   // jstat performance counters
   //  incremental collections both fully and partially young
@@ -133,9 +127,9 @@
   // _from_counters, and _to_counters are associated with
   // this "generational" counter.
   GenerationCounters*  _young_collection_counters;
-  //  non-young collection set counters. The _old_space_counters
+  //  old collection set counters. The _old_space_counters
   // below are associated with this "generational" counter.
-  GenerationCounters*  _non_young_collection_counters;
+  GenerationCounters*  _old_collection_counters;
   // Counters for the capacity and used for
   //   the whole heap
   HSpaceCounters*      _old_space_counters;
@@ -145,6 +139,27 @@
   HSpaceCounters*      _from_counters;
   HSpaceCounters*      _to_counters;
 
+  // When it's appropriate to recalculate the various sizes (at the
+  // end of a GC, when a new eden region is allocated, etc.) we store
+  // them here so that we can easily report them when needed and not
+  // have to recalculate them every time.
+
+  size_t _overall_reserved;
+  size_t _overall_committed;
+  size_t _overall_used;
+
+  size_t _young_region_num;
+  size_t _young_gen_committed;
+  size_t _eden_committed;
+  size_t _eden_used;
+  size_t _survivor_committed;
+  size_t _survivor_used;
+
+  size_t _old_committed;
+  size_t _old_used;
+
+  G1CollectedHeap* g1h() { return _g1h; }
+
   // It returns x - y if x > y, 0 otherwise.
   // As described in the comment above, some of the inputs to the
   // calculations we have to do are obtained concurrently and hence
@@ -160,15 +175,35 @@
     }
   }
 
+  // Recalculate all the sizes.
+  void recalculate_sizes();
+  // Recalculate only what's necessary when a new eden region is allocated.
+  void recalculate_eden_size();
+
  public:
-  G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
+  G1MonitoringSupport(G1CollectedHeap* g1h);
 
-  G1CollectedHeap* g1h() { return _g1h; }
-  VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
+  // Unfortunately, the jstat tool assumes that no space has 0
+  // capacity. In our case, given that each space is logical, it's
+  // possible that no regions will be allocated to it, hence to have 0
+  // capacity (e.g., if there are no survivor regions, the survivor
+  // space has 0 capacity). The way we deal with this is to always pad
+  // each capacity value we report to jstat by a very small amount to
+  // make sure that it's never zero. Given that we sometimes have to
+  // report a capacity of a generation that contains several spaces
+  // (e.g., young gen includes one eden, two survivor spaces), the
+  // mult parameter is provided in order to adding the appropriate
+  // padding multiple times so that the capacities add up correctly.
+  static size_t pad_capacity(size_t size_bytes, size_t mult = 1) {
+    return size_bytes + MinObjAlignmentInBytes * mult;
+  }
 
-  // Performance Counter accessors
-  void update_counters();
-  void update_eden_counters();
+  // Recalculate all the sizes from scratch and update all the jstat
+  // counters accordingly.
+  void update_sizes();
+  // Recalculate only what's necessary when a new eden region is
+  // allocated and update any jstat counters that need to be updated.
+  void update_eden_size();
 
   CollectorCounters* incremental_collection_counters() {
     return _incremental_collection_counters;
@@ -176,8 +211,11 @@
   CollectorCounters* full_collection_counters() {
     return _full_collection_counters;
   }
-  GenerationCounters* non_young_collection_counters() {
-    return _non_young_collection_counters;
+  GenerationCounters* young_collection_counters() {
+    return _young_collection_counters;
+  }
+  GenerationCounters* old_collection_counters() {
+    return _old_collection_counters;
   }
   HSpaceCounters*      old_space_counters() { return _old_space_counters; }
   HSpaceCounters*      eden_counters() { return _eden_counters; }
@@ -187,17 +225,45 @@
   // Monitoring support used by
   //   MemoryService
   //   jstat counters
-  size_t overall_committed();
-  size_t overall_used();
+
+  size_t overall_reserved()           { return _overall_reserved;     }
+  size_t overall_committed()          { return _overall_committed;    }
+  size_t overall_used()               { return _overall_used;         }
 
-  size_t eden_space_committed();
-  size_t eden_space_used();
+  size_t young_gen_committed()        { return _young_gen_committed;  }
+  size_t young_gen_max()              { return overall_reserved();    }
+  size_t eden_space_committed()       { return _eden_committed;       }
+  size_t eden_space_used()            { return _eden_used;            }
+  size_t survivor_space_committed()   { return _survivor_committed;   }
+  size_t survivor_space_used()        { return _survivor_used;        }
+
+  size_t old_gen_committed()          { return old_space_committed(); }
+  size_t old_gen_max()                { return overall_reserved();    }
+  size_t old_space_committed()        { return _old_committed;        }
+  size_t old_space_used()             { return _old_used;             }
+};
 
-  size_t survivor_space_committed();
-  size_t survivor_space_used();
+class G1GenerationCounters: public GenerationCounters {
+protected:
+  G1MonitoringSupport* _g1mm;
+
+public:
+  G1GenerationCounters(G1MonitoringSupport* g1mm,
+                       const char* name, int ordinal, int spaces,
+                       size_t min_capacity, size_t max_capacity,
+                       size_t curr_capacity);
+};
 
-  size_t old_space_committed();
-  size_t old_space_used();
+class G1YoungGenerationCounters: public G1GenerationCounters {
+public:
+  G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
+  virtual void update_all();
+};
+
+class G1OldGenerationCounters: public G1GenerationCounters {
+public:
+  G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
+  virtual void update_all();
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -34,6 +34,7 @@
 class CMMarkStack;
 class G1ParScanThreadState;
 class CMTask;
+class ReferenceProcessor;
 
 // A class that scans oops in a given heap region (much as OopsInGenClosure
 // scans oops in a generation.)
@@ -59,8 +60,10 @@
 
 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
 public:
-  G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+  G1ParPushHeapRSClosure(G1CollectedHeap* g1,
+                         G1ParScanThreadState* par_scan_state):
     G1ParClosureSuper(g1, par_scan_state) { }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
@@ -68,8 +71,13 @@
 
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
-  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    G1ParClosureSuper(g1, par_scan_state) { }
+  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1, par_scan_state)
+  {
+    assert(_ref_processor == NULL, "sanity");
+    _ref_processor = rp;
+  }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
@@ -92,9 +100,18 @@
 
 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
   G1ParScanClosure _scanner;
+
 public:
-  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
+  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
+  {
+    assert(_ref_processor == NULL, "sanity");
+  }
+
+  G1ParScanClosure* scanner() {
+    return &_scanner;
+  }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@@ -105,7 +122,8 @@
   G1ParScanClosure *_scanner;
 protected:
   template <class T> void mark_object(T* p);
-  oop copy_to_survivor_space(oop obj, bool should_mark_copy);
+  oop copy_to_survivor_space(oop obj, bool should_mark_root,
+                                      bool should_mark_copy);
 public:
   G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
                   G1ParScanClosure *scanner) :
@@ -116,10 +134,20 @@
          bool do_mark_object>
 class G1ParCopyClosure : public G1ParCopyHelper {
   G1ParScanClosure _scanner;
+
   template <class T> void do_oop_work(T* p);
+
 public:
-  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
+  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
+                   ReferenceProcessor* rp) :
+    _scanner(g1, par_scan_state, rp),
+    G1ParCopyHelper(g1, par_scan_state, &_scanner)
+  {
+    assert(_ref_processor == NULL, "sanity");
+  }
+
+  G1ParScanClosure* scanner() { return &_scanner; }
+
   template <class T> void do_oop_nv(T* p) {
     do_oop_work(p);
   }
@@ -129,21 +157,25 @@
 
 typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
 typedef G1ParCopyClosure<true,  G1BarrierNone, false> G1ParScanPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
+
 typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
 typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
+
+// The following closure types are no longer used but are retained
+// for historical reasons:
+// typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
+// typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
 
-// This is the only case when we set skip_cset_test. Basically, this
-// closure is (should?) only be called directly while we're draining
-// the overflow and task queues. In that case we know that the
-// reference in question points into the collection set, otherwise we
-// would not have pushed it on the queue. The following is defined in
-// g1_specialized_oop_closures.hpp.
-// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
-// We need a separate closure to handle references during evacuation
-// failure processing, as we cannot asume that the reference already
-// points into the collection set (like G1ParScanHeapEvacClosure does).
+// The following closure type is defined in g1_specialized_oop_closures.hpp:
+//
+// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+
+// We use a separate closure to handle references during evacuation
+// failure processing.
+// We could have used another instance of G1ParScanHeapEvacClosure
+// (since that closure no longer assumes that the references it
+// handles point into the collection set).
+
 typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public OopClosure {
@@ -152,9 +184,10 @@
   DirtyCardToOopClosure* _dcto_cl;
 public:
   FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
-                        G1CollectedHeap* g1, OopClosure* oc) :
-    _dcto_cl(dcto_cl), _g1(g1), _oc(oc)
-  {}
+                        G1CollectedHeap* g1,
+                        OopClosure* oc) :
+    _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
+
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)        { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -234,6 +234,7 @@
   HeapRegion *startRegion = calculateStartRegion(worker_i);
 
   ScanRSClosure scanRScl(oc, worker_i);
+
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
@@ -283,6 +284,7 @@
   double start = os::elapsedTime();
   // Apply the given closure to all remaining log entries.
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
+
   _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
 
   // Now there should be no dirty cards.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -290,10 +290,6 @@
           "each evacuation pause in order to artificially fill up the "     \
           "heap and stress the marking implementation.")                    \
                                                                             \
-  develop(bool, ReduceInitialCardMarksForG1, false,                         \
-          "When ReduceInitialCardMarks is true, this flag setting "         \
-          " controls whether G1 allows the RICM optimization")              \
-                                                                            \
   develop(bool, G1ExitOnExpansionFailure, false,                            \
           "Raise a fatal VM exit out of memory failure in the event "       \
           " that heap expansion fails due to running out of swap.")         \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -33,11 +33,11 @@
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 
-int HeapRegion::LogOfHRGrainBytes = 0;
-int HeapRegion::LogOfHRGrainWords = 0;
-int HeapRegion::GrainBytes        = 0;
-int HeapRegion::GrainWords        = 0;
-int HeapRegion::CardsPerRegion    = 0;
+int    HeapRegion::LogOfHRGrainBytes = 0;
+int    HeapRegion::LogOfHRGrainWords = 0;
+size_t HeapRegion::GrainBytes        = 0;
+size_t HeapRegion::GrainWords        = 0;
+size_t HeapRegion::CardsPerRegion    = 0;
 
 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
                                  HeapRegion* hr, OopClosure* cl,
@@ -45,7 +45,7 @@
                                  FilterKind fk) :
   ContiguousSpaceDCTOC(hr, cl, precision, NULL),
   _hr(hr), _fk(fk), _g1(g1)
-{}
+{ }
 
 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
                                                    OopClosure* oc) :
@@ -210,15 +210,17 @@
                                               HeapWord* top,
                                               OopClosure* cl) {
   G1CollectedHeap* g1h = _g1;
+  int oop_size;
+  OopClosure* cl2 = NULL;
 
-  int oop_size;
-
-  OopClosure* cl2 = cl;
   FilterIntoCSClosure intoCSFilt(this, g1h, cl);
   FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
+
   switch (_fk) {
+  case NoFilterKind:          cl2 = cl; break;
   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
+  default:                    ShouldNotReachHere();
   }
 
   // Start filtering what we add to the remembered set. If the object is
@@ -239,16 +241,19 @@
     case NoFilterKind:
       bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
       break;
+
     case IntoCSFilterKind: {
       FilterIntoCSClosure filt(this, g1h, cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
+
     case OutOfRegionFilterKind: {
       FilterOutOfRegionClosure filt(_hr, cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
+
     default:
       ShouldNotReachHere();
     }
@@ -317,11 +322,11 @@
   guarantee(GrainBytes == 0, "we should only set it once");
   // The cast to int is safe, given that we've bounded region_size by
   // MIN_REGION_SIZE and MAX_REGION_SIZE.
-  GrainBytes = (int) region_size;
+  GrainBytes = (size_t)region_size;
 
   guarantee(GrainWords == 0, "we should only set it once");
   GrainWords = GrainBytes >> LogHeapWordSize;
-  guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
+  guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
 
   guarantee(CardsPerRegion == 0, "we should only set it once");
   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
@@ -374,8 +379,7 @@
 
 void HeapRegion::par_clear() {
   assert(used() == 0, "the region should have been already cleared");
-  assert(capacity() == (size_t) HeapRegion::GrainBytes,
-         "should be back to normal");
+  assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
   HeapRegionRemSet* hrrs = rem_set();
   hrrs->clear();
   CardTableModRefBS* ct_bs =
@@ -431,7 +435,7 @@
     assert(end() == _orig_end, "sanity");
   }
 
-  assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
+  assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
   _humongous_type = NotHumongous;
   _humongous_start_region = NULL;
 }
@@ -483,12 +487,13 @@
 HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
            MemRegion mr, bool is_zeroed)
   : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
-    _next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
+    _hrs_index(hrs_index),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
     _in_collection_set(false),
     _next_in_special_set(NULL), _orig_end(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
+    _gc_efficiency(0.0),
     _young_type(NotYoung), _next_young_region(NULL),
     _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
 #ifdef ASSERT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -118,7 +118,6 @@
                   FilterKind fk);
 };
 
-
 // The complicating factor is that BlockOffsetTable diverged
 // significantly, and we need functionality that is only in the G1 version.
 // So I copied that code, which led to an alternate G1 version of
@@ -223,10 +222,6 @@
     ContinuesHumongous
   };
 
-  // The next filter kind that should be used for a "new_dcto_cl" call with
-  // the "traditional" signature.
-  HeapRegionDCTOC::FilterKind _next_fk;
-
   // Requires that the region "mr" be dense with objects, and begin and end
   // with an object.
   void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
@@ -351,16 +346,17 @@
              G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr, bool is_zeroed);
 
-  static int LogOfHRGrainBytes;
-  static int LogOfHRGrainWords;
-  // The normal type of these should be size_t. However, they used to
-  // be members of an enum before and they are assumed by the
-  // compilers to be ints. To avoid going and fixing all their uses,
-  // I'm declaring them as ints. I'm not anticipating heap region
-  // sizes to reach anywhere near 2g, so using an int here is safe.
-  static int GrainBytes;
-  static int GrainWords;
-  static int CardsPerRegion;
+  static int    LogOfHRGrainBytes;
+  static int    LogOfHRGrainWords;
+
+  static size_t GrainBytes;
+  static size_t GrainWords;
+  static size_t CardsPerRegion;
+
+  static size_t align_up_to_region_byte_size(size_t sz) {
+    return (sz + (size_t) GrainBytes - 1) &
+                                      ~((1 << (size_t) LogOfHRGrainBytes) - 1);
+  }
 
   // It sets up the heap region size (GrainBytes / GrainWords), as
   // well as other related fields that are based on the heap region
@@ -573,40 +569,14 @@
   // allocated in the current region before the last call to "save_mark".
   void oop_before_save_marks_iterate(OopClosure* cl);
 
-  // This call determines the "filter kind" argument that will be used for
-  // the next call to "new_dcto_cl" on this region with the "traditional"
-  // signature (i.e., the call below.)  The default, in the absence of a
-  // preceding call to this method, is "NoFilterKind", and a call to this
-  // method is necessary for each such call, or else it reverts to the
-  // default.
-  // (This is really ugly, but all other methods I could think of changed a
-  // lot of main-line code for G1.)
-  void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
-    _next_fk = nfk;
-  }
-
   DirtyCardToOopClosure*
   new_dcto_closure(OopClosure* cl,
                    CardTableModRefBS::PrecisionStyle precision,
                    HeapRegionDCTOC::FilterKind fk);
 
-#if WHASSUP
-  DirtyCardToOopClosure*
-  new_dcto_closure(OopClosure* cl,
-                   CardTableModRefBS::PrecisionStyle precision,
-                   HeapWord* boundary) {
-    assert(boundary == NULL, "This arg doesn't make sense here.");
-    DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
-    _next_fk = HeapRegionDCTOC::NoFilterKind;
-    return res;
-  }
-#endif
-
-  //
   // Note the start or end of marking. This tells the heap region
   // that the collector is about to start or has finished (concurrently)
   // marking the heap.
-  //
 
   // Note the start of a marking phase. Record the
   // start of the unmarked area of the region here.
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -148,7 +148,7 @@
       CardIdx_t from_card = (CardIdx_t)
           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 
-      assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
+      assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
              "Must be in range.");
       add_card_work(from_card, par);
     }
@@ -639,7 +639,7 @@
         uintptr_t(from_hr->bottom())
           >> CardTableModRefBS::card_shift;
       CardIdx_t card_index = from_card - from_hr_bot_card_index;
-      assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
+      assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
              "Must be in range.");
       if (G1HRRSUseSparseTable &&
           _sparse_table.add_card(from_hrs_ind, card_index)) {
@@ -1066,7 +1066,7 @@
       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
     assert(from_card >= hr_bot_card_index, "Inv");
     CardIdx_t card_index = from_card - hr_bot_card_index;
-    assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
+    assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
            "Must be in range.");
     return _sparse_table.contains_card(hr_ind, card_index);
   }
@@ -1191,7 +1191,7 @@
   _is = Sparse;
   // Set these values so that we increment to the first region.
   _coarse_cur_region_index = -1;
-  _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
+  _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
 
   _cur_region_cur_card = 0;
 
@@ -1270,7 +1270,7 @@
 bool HeapRegionRemSetIterator::fine_has_next() {
   return
     _fine_cur_prt != NULL &&
-    _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
+    _cur_region_cur_card < HeapRegion::CardsPerRegion;
 }
 
 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -395,8 +395,8 @@
   // Coarse table iteration fields:
 
   // Current region index;
-  int _coarse_cur_region_index;
-  int _coarse_cur_region_cur_card;
+  int    _coarse_cur_region_index;
+  size_t _coarse_cur_region_cur_card;
 
   bool coarse_has_next(size_t& card_index);
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -56,6 +56,7 @@
 // and maintain that: _length <= _allocated_length <= _max_length
 
 class HeapRegionSeq: public CHeapObj {
+  friend class VMStructs;
 
   // The array that holds the HeapRegions.
   HeapRegion** _regions;
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -29,6 +29,7 @@
 #include "memory/sharedHeap.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.hpp"
+#include "runtime/vmThread.hpp"
 
 // This method removes entries from an SATB buffer that will not be
 // useful to the concurrent marking threads. An entry is removed if it
@@ -252,9 +253,18 @@
       t->satb_mark_queue().apply_closure(_par_closures[worker]);
     }
   }
-  // We'll have worker 0 do this one.
-  if (worker == 0) {
-    shared_satb_queue()->apply_closure(_par_closures[0]);
+
+  // We also need to claim the VMThread so that its parity is updated
+  // otherwise the next call to Thread::possibly_parallel_oops_do inside
+  // a StrongRootsScope might skip the VMThread because it has a stale
+  // parity that matches the parity set by the StrongRootsScope
+  //
+  // Whichever worker succeeds in claiming the VMThread gets to do
+  // the shared queue.
+
+  VMThread* vmt = VMThread::vm_thread();
+  if (vmt->claim_oops_do(true, parity)) {
+    shared_satb_queue()->apply_closure(_par_closures[worker]);
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
+
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+
+#define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
+                                                                              \
+  static_field(HeapRegion, GrainBytes, size_t)                                \
+                                                                              \
+  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
+  nonstatic_field(HeapRegionSeq,   _length,  size_t)                          \
+                                                                              \
+  nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
+  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
+  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
+  nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
+                                                                              \
+  nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
+  nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
+  nonstatic_field(G1MonitoringSupport, _survivor_committed, size_t)           \
+  nonstatic_field(G1MonitoringSupport, _survivor_used,      size_t)           \
+  nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
+  nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
+
+
+#define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
+                                                                              \
+  declare_type(G1CollectedHeap, SharedHeap)                                   \
+                                                                              \
+  declare_type(HeapRegion, ContiguousSpace)                                   \
+  declare_toplevel_type(HeapRegionSeq)                                        \
+  declare_toplevel_type(G1MonitoringSupport)                                  \
+                                                                              \
+  declare_toplevel_type(G1CollectedHeap*)                                     \
+  declare_toplevel_type(HeapRegion*)                                          \
+  declare_toplevel_type(G1MonitoringSupport*)                                 \
+
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -198,10 +198,9 @@
 
     allocate_stacks();
 
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery();
+    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     ref_processor()->setup_policy(clear_all_softrefs);
 
     mark_sweep_phase1(clear_all_softrefs);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -2069,10 +2069,9 @@
     CodeCache::gc_prologue();
     Threads::gc_prologue();
 
-    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery();
+    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     ref_processor()->setup_policy(maximum_heap_compaction);
 
     bool marked_for_unloading = false;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -102,17 +102,15 @@
   _state = flushed;
 }
 
-bool PSPromotionLAB::unallocate_object(oop obj) {
+bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
   assert(Universe::heap()->is_in(obj), "Object outside heap");
 
   if (contains(obj)) {
-    HeapWord* object_end = (HeapWord*)obj + obj->size();
-    assert(object_end <= top(), "Object crosses promotion LAB boundary");
+    HeapWord* object_end = obj + obj_size;
+    assert(object_end == top(), "Not matching last allocation");
 
-    if (object_end == top()) {
-      set_top((HeapWord*)obj);
-      return true;
-    }
+    set_top(obj);
+    return true;
   }
 
   return false;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -73,7 +73,7 @@
 
   bool is_flushed()                  { return _state == flushed; }
 
-  bool unallocate_object(oop obj);
+  bool unallocate_object(HeapWord* obj, size_t obj_size);
 
   // Returns a subregion containing all objects in this space.
   MemRegion used_region()            { return MemRegion(bottom(), top()); }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -380,10 +380,10 @@
       // deallocate it, so we have to test.  If the deallocation fails,
       // overwrite with a filler object.
       if (new_obj_is_tenured) {
-        if (!_old_lab.unallocate_object(new_obj)) {
+        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
           CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
         }
-      } else if (!_young_lab.unallocate_object(new_obj)) {
+      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
       }
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -350,10 +350,9 @@
     }
     save_to_space_top_before_gc();
 
-    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    reference_processor()->enable_discovery();
+    reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
     reference_processor()->setup_policy(false);
 
     // We track how much was promoted to the next generation for
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -26,14 +26,10 @@
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/resourceArea.hpp"
 
-
-GenerationCounters::GenerationCounters(const char* name,
-                                       int ordinal, int spaces,
-                                       VirtualSpace* v):
-                    _virtual_space(v) {
-
+void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
+                                    size_t min_capacity, size_t max_capacity,
+                                    size_t curr_capacity) {
   if (UsePerfData) {
-
     EXCEPTION_MARK;
     ResourceMark rm;
 
@@ -51,18 +47,37 @@
 
     cname = PerfDataManager::counter_name(_name_space, "minCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->committed_size(), CHECK);
+                                     min_capacity, CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->reserved_size(), CHECK);
+                                     max_capacity, CHECK);
 
     cname = PerfDataManager::counter_name(_name_space, "capacity");
-    _current_size = PerfDataManager::create_variable(SUN_GC, cname,
-                                     PerfData::U_Bytes,
-                                     _virtual_space == NULL ? 0 :
-                                     _virtual_space->committed_size(), CHECK);
+    _current_size =
+      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
+                                       curr_capacity, CHECK);
   }
 }
+
+GenerationCounters::GenerationCounters(const char* name,
+                                       int ordinal, int spaces,
+                                       VirtualSpace* v)
+  : _virtual_space(v) {
+  assert(v != NULL, "don't call this constructor if v == NULL");
+  initialize(name, ordinal, spaces,
+             v->committed_size(), v->reserved_size(), v->committed_size());
+}
+
+GenerationCounters::GenerationCounters(const char* name,
+                                       int ordinal, int spaces,
+                                       size_t min_capacity, size_t max_capacity,
+                                       size_t curr_capacity)
+  : _virtual_space(NULL) {
+  initialize(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity);
+}
+
+void GenerationCounters::update_all() {
+  assert(_virtual_space != NULL, "otherwise, override this method");
+  _current_size->set_value(_virtual_space->committed_size());
+}
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -34,6 +34,11 @@
 class GenerationCounters: public CHeapObj {
   friend class VMStructs;
 
+private:
+  void initialize(const char* name, int ordinal, int spaces,
+                  size_t min_capacity, size_t max_capacity,
+                  size_t curr_capacity);
+
  protected:
   PerfVariable*      _current_size;
   VirtualSpace*      _virtual_space;
@@ -48,11 +53,18 @@
   char*              _name_space;
 
   // This constructor is only meant for use with the PSGenerationCounters
-  // constructor.  The need for such an constructor should be eliminated
+  // constructor. The need for such an constructor should be eliminated
   // when VirtualSpace and PSVirtualSpace are unified.
-  GenerationCounters() : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
+  GenerationCounters()
+             : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
+
+  // This constructor is used for subclasses that do not have a space
+  // associated with them (e.g, in G1).
+  GenerationCounters(const char* name, int ordinal, int spaces,
+                     size_t min_capacity, size_t max_capacity,
+                     size_t curr_capacity);
+
  public:
-
   GenerationCounters(const char* name, int ordinal, int spaces,
                      VirtualSpace* v);
 
@@ -60,10 +72,7 @@
     if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
   }
 
-  virtual void update_all() {
-    _current_size->set_value(_virtual_space == NULL ? 0 :
-                             _virtual_space->committed_size());
-  }
+  virtual void update_all();
 
   const char* name_space() const        { return _name_space; }
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -599,8 +599,7 @@
           // atomic wrt other collectors in this configuration, we
           // are guaranteed to have empty discovered ref lists.
           if (rp->discovery_is_atomic()) {
-            rp->verify_no_references_recorded();
-            rp->enable_discovery();
+            rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
             rp->setup_policy(do_clear_all_soft_refs);
           } else {
             // collect() below will enable discovery as appropriate
--- a/hotspot/src/share/vm/memory/referencePolicy.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/memory/referencePolicy.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,8 +41,9 @@
 
 // The oop passed in is the SoftReference object, and not
 // the object the SoftReference points to.
-bool LRUCurrentHeapPolicy::should_clear_reference(oop p) {
-  jlong interval = java_lang_ref_SoftReference::clock() - java_lang_ref_SoftReference::timestamp(p);
+bool LRUCurrentHeapPolicy::should_clear_reference(oop p,
+                                                  jlong timestamp_clock) {
+  jlong interval = timestamp_clock - java_lang_ref_SoftReference::timestamp(p);
   assert(interval >= 0, "Sanity check");
 
   // The interval will be zero if the ref was accessed since the last scavenge/gc.
@@ -71,8 +72,9 @@
 
 // The oop passed in is the SoftReference object, and not
 // the object the SoftReference points to.
-bool LRUMaxHeapPolicy::should_clear_reference(oop p) {
-  jlong interval = java_lang_ref_SoftReference::clock() - java_lang_ref_SoftReference::timestamp(p);
+bool LRUMaxHeapPolicy::should_clear_reference(oop p,
+                                             jlong timestamp_clock) {
+  jlong interval = timestamp_clock - java_lang_ref_SoftReference::timestamp(p);
   assert(interval >= 0, "Sanity check");
 
   // The interval will be zero if the ref was accessed since the last scavenge/gc.
--- a/hotspot/src/share/vm/memory/referencePolicy.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/memory/referencePolicy.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,19 +31,27 @@
 
 class ReferencePolicy : public CHeapObj {
  public:
-  virtual bool should_clear_reference(oop p)       { ShouldNotReachHere(); return true; }
+  virtual bool should_clear_reference(oop p, jlong timestamp_clock) {
+    ShouldNotReachHere();
+    return true;
+  }
+
   // Capture state (of-the-VM) information needed to evaluate the policy
   virtual void setup() { /* do nothing */ }
 };
 
 class NeverClearPolicy : public ReferencePolicy {
  public:
-  bool should_clear_reference(oop p) { return false; }
+  virtual bool should_clear_reference(oop p, jlong timestamp_clock) {
+    return false;
+  }
 };
 
 class AlwaysClearPolicy : public ReferencePolicy {
  public:
-  bool should_clear_reference(oop p) { return true; }
+  virtual bool should_clear_reference(oop p, jlong timestamp_clock) {
+    return true;
+  }
 };
 
 class LRUCurrentHeapPolicy : public ReferencePolicy {
@@ -55,7 +63,7 @@
 
   // Capture state (of-the-VM) information needed to evaluate the policy
   void setup();
-  bool should_clear_reference(oop p);
+  virtual bool should_clear_reference(oop p, jlong timestamp_clock);
 };
 
 class LRUMaxHeapPolicy : public ReferencePolicy {
@@ -67,7 +75,7 @@
 
   // Capture state (of-the-VM) information needed to evaluate the policy
   void setup();
-  bool should_clear_reference(oop p);
+  virtual bool should_clear_reference(oop p, jlong timestamp_clock);
 };
 
 #endif // SHARE_VM_MEMORY_REFERENCEPOLICY_HPP
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -35,49 +35,20 @@
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
-const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
 bool             ReferenceProcessor::_pending_list_uses_discovered_field = false;
-
-// List of discovered references.
-class DiscoveredList {
-public:
-  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
-  oop head() const     {
-     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
-                                _oop_head;
-  }
-  HeapWord* adr_head() {
-    return UseCompressedOops ? (HeapWord*)&_compressed_head :
-                               (HeapWord*)&_oop_head;
-  }
-  void   set_head(oop o) {
-    if (UseCompressedOops) {
-      // Must compress the head ptr.
-      _compressed_head = oopDesc::encode_heap_oop(o);
-    } else {
-      _oop_head = o;
-    }
-  }
-  bool   empty() const          { return head() == NULL; }
-  size_t length()               { return _len; }
-  void   set_length(size_t len) { _len = len;  }
-  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
-  void   dec_length(size_t dec) { _len -= dec; }
-private:
-  // Set value depending on UseCompressedOops. This could be a template class
-  // but then we have to fix all the instantiations and declarations that use this class.
-  oop       _oop_head;
-  narrowOop _compressed_head;
-  size_t _len;
-};
+jlong            ReferenceProcessor::_soft_ref_timestamp_clock = 0;
 
 void referenceProcessor_init() {
   ReferenceProcessor::init_statics();
 }
 
 void ReferenceProcessor::init_statics() {
-  // Initialize the master soft ref clock.
-  java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
+  jlong now = os::javaTimeMillis();
+
+  // Initialize the soft ref timestamp clock.
+  _soft_ref_timestamp_clock = now;
+  // Also update the soft ref clock in j.l.r.SoftReference
+  java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
 
   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
@@ -91,6 +62,28 @@
   _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
 }
 
+void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) {
+#ifdef ASSERT
+  // Verify that we're not currently discovering refs
+  assert(!verify_disabled || !_discovering_refs, "nested call?");
+
+  if (check_no_refs) {
+    // Verify that the discovered lists are empty
+    verify_no_references_recorded();
+  }
+#endif // ASSERT
+
+  // Someone could have modified the value of the static
+  // field in the j.l.r.SoftReference class that holds the
+  // soft reference timestamp clock using reflection or
+  // Unsafe between GCs. Unconditionally update the static
+  // field in ReferenceProcessor here so that we use the new
+  // value during reference discovery.
+
+  _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
+  _discovering_refs = true;
+}
+
 ReferenceProcessor::ReferenceProcessor(MemRegion span,
                                        bool      mt_processing,
                                        int       mt_processing_degree,
@@ -112,7 +105,8 @@
   _discovery_is_mt     = mt_discovery;
   _num_q               = MAX2(1, mt_processing_degree);
   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
-  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
+  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList,
+                                          _max_num_q * number_of_subclasses_of_ref());
   if (_discoveredSoftRefs == NULL) {
     vm_exit_during_initialization("Could not allocated RefProc Array");
   }
@@ -120,7 +114,7 @@
   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
   // Initialized all entries to NULL
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     _discoveredSoftRefs[i].set_head(NULL);
     _discoveredSoftRefs[i].set_length(0);
   }
@@ -134,19 +128,15 @@
 #ifndef PRODUCT
 void ReferenceProcessor::verify_no_references_recorded() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-    guarantee(_discoveredSoftRefs[i].empty(),
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+    guarantee(_discoveredSoftRefs[i].is_empty(),
               "Found non-empty discovered list");
   }
 }
 #endif
 
 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
-  // Should this instead be
-  // for (int i = 0; i < subclasses_of_ref; i++_ {
-  //   for (int j = 0; j < _num_q; j++) {
-  //     int index = i * _max_num_q + j;
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (UseCompressedOops) {
       f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
     } else {
@@ -159,17 +149,21 @@
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
   jlong now = os::javaTimeMillis();
-  jlong clock = java_lang_ref_SoftReference::clock();
+  jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
+  assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync");
+
   NOT_PRODUCT(
-  if (now < clock) {
-    warning("time warp: %d to %d", clock, now);
+  if (now < _soft_ref_timestamp_clock) {
+    warning("time warp: "INT64_FORMAT" to "INT64_FORMAT,
+            _soft_ref_timestamp_clock, now);
   }
   )
   // In product mode, protect ourselves from system time being adjusted
   // externally and going backward; see note in the implementation of
   // GenCollectedHeap::time_since_last_gc() for the right way to fix
   // this uniformly throughout the VM; see bug-id 4741166. XXX
-  if (now > clock) {
+  if (now > _soft_ref_timestamp_clock) {
+    _soft_ref_timestamp_clock = now;
     java_lang_ref_SoftReference::set_clock(now);
   }
   // Else leave clock stalled at its old value until time progresses
@@ -187,6 +181,16 @@
   // Stop treating discovered references specially.
   disable_discovery();
 
+  // If discovery was concurrent, someone could have modified
+  // the value of the static field in the j.l.r.SoftReference
+  // class that holds the soft reference timestamp clock using
+  // reflection or Unsafe between when discovery was enabled and
+  // now. Unconditionally update the static field in ReferenceProcessor
+  // here so that we use the new value during processing of the
+  // discovered soft refs.
+
+  _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
+
   bool trace_time = PrintGCDetails && PrintReferenceGC;
   // Soft references
   {
@@ -404,7 +408,7 @@
     // allocated and are indexed into.
     assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
     for (int j = 0;
-         j < subclasses_of_ref;
+         j < ReferenceProcessor::number_of_subclasses_of_ref();
          j++, index += _n_queues) {
       _ref_processor.enqueue_discovered_reflist(
         _refs_lists[index], _pending_list_addr);
@@ -424,7 +428,7 @@
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
-    for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+    for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
       _discoveredSoftRefs[i].set_head(NULL);
       _discoveredSoftRefs[i].set_length(0);
@@ -432,119 +436,7 @@
   }
 }
 
-// Iterator for the list of discovered references.
-class DiscoveredListIterator {
-public:
-  inline DiscoveredListIterator(DiscoveredList&    refs_list,
-                                OopClosure*        keep_alive,
-                                BoolObjectClosure* is_alive);
-
-  // End Of List.
-  inline bool has_next() const { return _ref != NULL; }
-
-  // Get oop to the Reference object.
-  inline oop obj() const { return _ref; }
-
-  // Get oop to the referent object.
-  inline oop referent() const { return _referent; }
-
-  // Returns true if referent is alive.
-  inline bool is_referent_alive() const;
-
-  // Loads data for the current reference.
-  // The "allow_null_referent" argument tells us to allow for the possibility
-  // of a NULL referent in the discovered Reference object. This typically
-  // happens in the case of concurrent collectors that may have done the
-  // discovery concurrently, or interleaved, with mutator execution.
-  inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
-
-  // Move to the next discovered reference.
-  inline void next();
-
-  // Remove the current reference from the list
-  inline void remove();
-
-  // Make the Reference object active again.
-  inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
-
-  // Make the referent alive.
-  inline void make_referent_alive() {
-    if (UseCompressedOops) {
-      _keep_alive->do_oop((narrowOop*)_referent_addr);
-    } else {
-      _keep_alive->do_oop((oop*)_referent_addr);
-    }
-  }
-
-  // Update the discovered field.
-  inline void update_discovered() {
-    // First _prev_next ref actually points into DiscoveredList (gross).
-    if (UseCompressedOops) {
-      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
-        _keep_alive->do_oop((narrowOop*)_prev_next);
-      }
-    } else {
-      if (!oopDesc::is_null(*(oop*)_prev_next)) {
-        _keep_alive->do_oop((oop*)_prev_next);
-      }
-    }
-  }
-
-  // NULL out referent pointer.
-  inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
-
-  // Statistics
-  NOT_PRODUCT(
-  inline size_t processed() const { return _processed; }
-  inline size_t removed() const   { return _removed; }
-  )
-
-  inline void move_to_next();
-
-private:
-  DiscoveredList&    _refs_list;
-  HeapWord*          _prev_next;
-  oop                _prev;
-  oop                _ref;
-  HeapWord*          _discovered_addr;
-  oop                _next;
-  HeapWord*          _referent_addr;
-  oop                _referent;
-  OopClosure*        _keep_alive;
-  BoolObjectClosure* _is_alive;
-  DEBUG_ONLY(
-  oop                _first_seen; // cyclic linked list check
-  )
-  NOT_PRODUCT(
-  size_t             _processed;
-  size_t             _removed;
-  )
-};
-
-inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList&    refs_list,
-                                                      OopClosure*        keep_alive,
-                                                      BoolObjectClosure* is_alive)
-  : _refs_list(refs_list),
-    _prev_next(refs_list.adr_head()),
-    _prev(NULL),
-    _ref(refs_list.head()),
-#ifdef ASSERT
-    _first_seen(refs_list.head()),
-#endif
-#ifndef PRODUCT
-    _processed(0),
-    _removed(0),
-#endif
-    _next(NULL),
-    _keep_alive(keep_alive),
-    _is_alive(is_alive)
-{ }
-
-inline bool DiscoveredListIterator::is_referent_alive() const {
-  return _is_alive->do_object_b(_referent);
-}
-
-inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
+void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
   oop discovered = java_lang_ref_Reference::discovered(_ref);
   assert(_discovered_addr && discovered->is_oop_or_null(),
@@ -560,13 +452,7 @@
          "bad referent");
 }
 
-inline void DiscoveredListIterator::next() {
-  _prev_next = _discovered_addr;
-  _prev = _ref;
-  move_to_next();
-}
-
-inline void DiscoveredListIterator::remove() {
+void DiscoveredListIterator::remove() {
   assert(_ref->is_oop(), "Dropping a bad reference");
   oop_store_raw(_discovered_addr, NULL);
 
@@ -592,15 +478,29 @@
   _refs_list.dec_length(1);
 }
 
-inline void DiscoveredListIterator::move_to_next() {
-  if (_ref == _next) {
-    // End of the list.
-    _ref = NULL;
+// Make the Reference object active again.
+void DiscoveredListIterator::make_active() {
+  // For G1 we don't want to use set_next - it
+  // will dirty the card for the next field of
+  // the reference object and will fail
+  // CT verification.
+  if (UseG1GC) {
+    BarrierSet* bs = oopDesc::bs();
+    HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
+
+    if (UseCompressedOops) {
+      bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
+    } else {
+      bs->write_ref_field_pre((oop*)next_addr, NULL);
+    }
+    java_lang_ref_Reference::set_next_raw(_ref, NULL);
   } else {
-    _ref = _next;
+    java_lang_ref_Reference::set_next(_ref, NULL);
   }
-  assert(_ref != _first_seen, "cyclic ref_list found");
-  NOT_PRODUCT(_processed++);
+}
+
+void DiscoveredListIterator::clear_referent() {
+  oop_store_raw(_referent_addr, NULL);
 }
 
 // NOTE: process_phase*() are largely similar, and at a high level
@@ -627,7 +527,8 @@
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
     bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
-    if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
+    if (referent_is_dead &&
+        !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
                                iter.obj(), iter.obj()->blueprint()->internal_name());
@@ -786,10 +687,9 @@
 
 void ReferenceProcessor::abandon_partial_discovery() {
   // loop over the lists
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
-      gclog_or_tty->print_cr("\nAbandoning %s discovered list",
-                             list_name(i));
+      gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
     }
     abandon_partial_discovered_list(_discoveredSoftRefs[i]);
   }
@@ -858,6 +758,14 @@
   bool _clear_referent;
 };
 
+void ReferenceProcessor::set_discovered(oop ref, oop value) {
+  if (_discovered_list_needs_barrier) {
+    java_lang_ref_Reference::set_discovered(ref, value);
+  } else {
+    java_lang_ref_Reference::set_discovered_raw(ref, value);
+  }
+}
+
 // Balances reference queues.
 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
 // queues[0, 1, ..., _num_q-1] because only the first _num_q
@@ -915,9 +823,9 @@
         // Add the chain to the to list.
         if (ref_lists[to_idx].head() == NULL) {
           // to list is empty. Make a loop at the end.
-          java_lang_ref_Reference::set_discovered(move_tail, move_tail);
+          set_discovered(move_tail, move_tail);
         } else {
-          java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
+          set_discovered(move_tail, ref_lists[to_idx].head());
         }
         ref_lists[to_idx].set_head(move_head);
         ref_lists[to_idx].inc_length(refs_to_move);
@@ -1038,11 +946,7 @@
 
 void ReferenceProcessor::clean_up_discovered_references() {
   // loop over the lists
-  // Should this instead be
-  // for (int i = 0; i < subclasses_of_ref; i++_ {
-  //   for (int j = 0; j < _num_q; j++) {
-  //     int index = i * _max_num_q + j;
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
       gclog_or_tty->print_cr(
         "\nScrubbing %s discovered list of Null referents",
@@ -1255,11 +1159,13 @@
     // time-stamp policies advance the soft-ref clock only
     // at a major collection cycle, this is always currently
     // accurate.
-    if (!_current_soft_ref_policy->should_clear_reference(obj)) {
+    if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) {
       return false;
     }
   }
 
+  ResourceMark rm;      // Needed for tracing.
+
   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
   const oop  discovered = java_lang_ref_Reference::discovered(obj);
   assert(discovered->is_oop_or_null(), "bad discovered field");
@@ -1472,7 +1378,9 @@
 }
 
 const char* ReferenceProcessor::list_name(int i) {
-   assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
+   assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
+          "Out of bounds index");
+
    int j = i / _max_num_q;
    switch (j) {
      case 0: return "SoftRef";
@@ -1493,7 +1401,7 @@
 #ifndef PRODUCT
 void ReferenceProcessor::clear_discovered_references() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
+  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     clear_discovered_references(_discoveredSoftRefs[i]);
   }
 }
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -48,18 +48,177 @@
 // forward references
 class ReferencePolicy;
 class AbstractRefProcTaskExecutor;
-class DiscoveredList;
+
+// List of discovered references.
+class DiscoveredList {
+public:
+  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
+  oop head() const     {
+     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
+                                _oop_head;
+  }
+  HeapWord* adr_head() {
+    return UseCompressedOops ? (HeapWord*)&_compressed_head :
+                               (HeapWord*)&_oop_head;
+  }
+  void set_head(oop o) {
+    if (UseCompressedOops) {
+      // Must compress the head ptr.
+      _compressed_head = oopDesc::encode_heap_oop(o);
+    } else {
+      _oop_head = o;
+    }
+  }
+  bool   is_empty() const       { return head() == NULL; }
+  size_t length()               { return _len; }
+  void   set_length(size_t len) { _len = len;  }
+  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
+  void   dec_length(size_t dec) { _len -= dec; }
+private:
+  // Set value depending on UseCompressedOops. This could be a template class
+  // but then we have to fix all the instantiations and declarations that use this class.
+  oop       _oop_head;
+  narrowOop _compressed_head;
+  size_t _len;
+};
+
+// Iterator for the list of discovered references.
+class DiscoveredListIterator {
+private:
+  DiscoveredList&    _refs_list;
+  HeapWord*          _prev_next;
+  oop                _prev;
+  oop                _ref;
+  HeapWord*          _discovered_addr;
+  oop                _next;
+  HeapWord*          _referent_addr;
+  oop                _referent;
+  OopClosure*        _keep_alive;
+  BoolObjectClosure* _is_alive;
+
+  DEBUG_ONLY(
+  oop                _first_seen; // cyclic linked list check
+  )
+
+  NOT_PRODUCT(
+  size_t             _processed;
+  size_t             _removed;
+  )
+
+public:
+  inline DiscoveredListIterator(DiscoveredList&    refs_list,
+                                OopClosure*        keep_alive,
+                                BoolObjectClosure* is_alive):
+    _refs_list(refs_list),
+    _prev_next(refs_list.adr_head()),
+    _prev(NULL),
+    _ref(refs_list.head()),
+#ifdef ASSERT
+    _first_seen(refs_list.head()),
+#endif
+#ifndef PRODUCT
+    _processed(0),
+    _removed(0),
+#endif
+    _next(NULL),
+    _keep_alive(keep_alive),
+    _is_alive(is_alive)
+{ }
+
+  // End Of List.
+  inline bool has_next() const { return _ref != NULL; }
+
+  // Get oop to the Reference object.
+  inline oop obj() const { return _ref; }
+
+  // Get oop to the referent object.
+  inline oop referent() const { return _referent; }
+
+  // Returns true if referent is alive.
+  inline bool is_referent_alive() const {
+    return _is_alive->do_object_b(_referent);
+  }
+
+  // Loads data for the current reference.
+  // The "allow_null_referent" argument tells us to allow for the possibility
+  // of a NULL referent in the discovered Reference object. This typically
+  // happens in the case of concurrent collectors that may have done the
+  // discovery concurrently, or interleaved, with mutator execution.
+  void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
+
+  // Move to the next discovered reference.
+  inline void next() {
+    _prev_next = _discovered_addr;
+    _prev = _ref;
+    move_to_next();
+  }
+
+  // Remove the current reference from the list
+  void remove();
+
+  // Make the Reference object active again.
+  void make_active();
+
+  // Make the referent alive.
+  inline void make_referent_alive() {
+    if (UseCompressedOops) {
+      _keep_alive->do_oop((narrowOop*)_referent_addr);
+    } else {
+      _keep_alive->do_oop((oop*)_referent_addr);
+    }
+  }
+
+  // Update the discovered field.
+  inline void update_discovered() {
+    // First _prev_next ref actually points into DiscoveredList (gross).
+    if (UseCompressedOops) {
+      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
+        _keep_alive->do_oop((narrowOop*)_prev_next);
+      }
+    } else {
+      if (!oopDesc::is_null(*(oop*)_prev_next)) {
+        _keep_alive->do_oop((oop*)_prev_next);
+      }
+    }
+  }
+
+  // NULL out referent pointer.
+  void clear_referent();
+
+  // Statistics
+  NOT_PRODUCT(
+  inline size_t processed() const { return _processed; }
+  inline size_t removed() const   { return _removed; }
+  )
+
+  inline void move_to_next() {
+    if (_ref == _next) {
+      // End of the list.
+      _ref = NULL;
+    } else {
+      _ref = _next;
+    }
+    assert(_ref != _first_seen, "cyclic ref_list found");
+    NOT_PRODUCT(_processed++);
+  }
+};
 
 class ReferenceProcessor : public CHeapObj {
  protected:
   // Compatibility with pre-4965777 JDK's
   static bool _pending_list_uses_discovered_field;
-  MemRegion   _span; // (right-open) interval of heap
-                     // subject to wkref discovery
-  bool        _discovering_refs;      // true when discovery enabled
-  bool        _discovery_is_atomic;   // if discovery is atomic wrt
-                                      // other collectors in configuration
-  bool        _discovery_is_mt;       // true if reference discovery is MT.
+
+  // The SoftReference master timestamp clock
+  static jlong _soft_ref_timestamp_clock;
+
+  MemRegion   _span;                    // (right-open) interval of heap
+                                        // subject to wkref discovery
+
+  bool        _discovering_refs;        // true when discovery enabled
+  bool        _discovery_is_atomic;     // if discovery is atomic wrt
+                                        // other collectors in configuration
+  bool        _discovery_is_mt;         // true if reference discovery is MT.
+
   // If true, setting "next" field of a discovered refs list requires
   // write barrier(s).  (Must be true if used in a collector in which
   // elements of a discovered list may be moved during discovery: for
@@ -67,18 +226,19 @@
   // long-term concurrent marking phase that does weak reference
   // discovery.)
   bool        _discovered_list_needs_barrier;
-  BarrierSet* _bs;                    // Cached copy of BarrierSet.
-  bool        _enqueuing_is_done;     // true if all weak references enqueued
-  bool        _processing_is_mt;      // true during phases when
-                                      // reference processing is MT.
-  int         _next_id;               // round-robin mod _num_q counter in
-                                      // support of work distribution
 
-  // For collectors that do not keep GC marking information
+  BarrierSet* _bs;                      // Cached copy of BarrierSet.
+  bool        _enqueuing_is_done;       // true if all weak references enqueued
+  bool        _processing_is_mt;        // true during phases when
+                                        // reference processing is MT.
+  int         _next_id;                 // round-robin mod _num_q counter in
+                                        // support of work distribution
+
+  // For collectors that do not keep GC liveness information
   // in the object header, this field holds a closure that
   // helps the reference processor determine the reachability
-  // of an oop (the field is currently initialized to NULL for
-  // all collectors but the CMS collector).
+  // of an oop. It is currently initialized to NULL for all
+  // collectors except for CMS and G1.
   BoolObjectClosure* _is_alive_non_header;
 
   // Soft ref clearing policies
@@ -102,10 +262,13 @@
   DiscoveredList* _discoveredPhantomRefs;
 
  public:
-  int num_q()                            { return _num_q; }
-  int max_num_q()                        { return _max_num_q; }
-  void set_active_mt_degree(int v)       { _num_q = v; }
-  DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
+  static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
+
+  int num_q()                              { return _num_q; }
+  int max_num_q()                          { return _max_num_q; }
+  void set_active_mt_degree(int v)         { _num_q = v; }
+  DiscoveredList* discovered_soft_refs()   { return _discoveredSoftRefs; }
+
   ReferencePolicy* setup_policy(bool always_clear) {
     _current_soft_ref_policy = always_clear ?
       _always_clear_soft_ref_policy : _default_soft_ref_policy;
@@ -205,6 +368,11 @@
   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 
  protected:
+  // Set the 'discovered' field of the given reference to
+  // the given value - emitting barriers depending upon
+  // the value of _discovered_list_needs_barrier.
+  void set_discovered(oop ref, oop value);
+
   // "Preclean" the given discovered reference list
   // by removing references with strongly reachable referents.
   // Currently used in support of CMS only.
@@ -290,7 +458,7 @@
   void      set_span(MemRegion span) { _span = span; }
 
   // start and stop weak ref discovery
-  void enable_discovery()   { _discovering_refs = true;  }
+  void enable_discovery(bool verify_disabled, bool check_no_refs);
   void disable_discovery()  { _discovering_refs = false; }
   bool discovery_enabled()  { return _discovering_refs;  }
 
@@ -365,7 +533,7 @@
 
   ~NoRefDiscovery() {
     if (_was_discovering_refs) {
-      _rp->enable_discovery();
+      _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
     }
   }
 };
--- a/hotspot/src/share/vm/runtime/thread.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -753,8 +753,9 @@
   jint thread_parity = _oops_do_parity;
   if (thread_parity != strong_roots_parity) {
     jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
-    if (res == thread_parity) return true;
-    else {
+    if (res == thread_parity) {
+      return true;
+    } else {
       guarantee(res == strong_roots_parity, "Or else what?");
       assert(SharedHeap::heap()->n_par_threads() > 0,
              "Should only fail when parallel.");
@@ -3909,8 +3910,9 @@
     }
   }
   VMThread* vmt = VMThread::vm_thread();
-  if (vmt->claim_oops_do(is_par, cp))
+  if (vmt->claim_oops_do(is_par, cp)) {
     vmt->oops_do(f, cf);
+  }
 }
 
 #ifndef SERIALGC
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -182,6 +182,7 @@
 #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 #include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp"
+#include "gc_implementation/g1/vmStructs_g1.hpp"
 #endif
 #ifdef COMPILER2
 #include "opto/addnode.hpp"
@@ -2878,6 +2879,9 @@
   VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
                  GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
                  GENERATE_STATIC_VM_STRUCT_ENTRY)
+
+  VM_STRUCTS_G1(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+                GENERATE_STATIC_VM_STRUCT_ENTRY)
 #endif // SERIALGC
 
   VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
@@ -2921,6 +2925,9 @@
                GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 
   VM_TYPES_PARNEW(GENERATE_VM_TYPE_ENTRY)
+
+  VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY,
+              GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
 #endif // SERIALGC
 
   VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
@@ -3020,6 +3027,9 @@
   VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
              CHECK_STATIC_VM_STRUCT_ENTRY);
+
+  VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+                CHECK_STATIC_VM_STRUCT_ENTRY);
 #endif // SERIALGC
 
   VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
@@ -3060,6 +3070,9 @@
                CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 
   VM_TYPES_PARNEW(CHECK_VM_TYPE_ENTRY)
+
+  VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
+              CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
 #endif // SERIALGC
 
   VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
@@ -3125,6 +3138,8 @@
   debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT));
+  debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT, \
+                           ENSURE_FIELD_TYPE_PRESENT));
 #endif // SERIALGC
   debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT, \
--- a/hotspot/src/share/vm/services/g1MemoryPool.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/services/g1MemoryPool.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -32,71 +32,44 @@
 G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
                                      const char* name,
                                      size_t init_size,
+                                     size_t max_size,
                                      bool support_usage_threshold) :
-  _g1h(g1h), CollectedMemoryPool(name,
-                                   MemoryPool::Heap,
-                                   init_size,
-                                   undefined_max(),
-                                   support_usage_threshold) {
+  _g1mm(g1h->g1mm()), CollectedMemoryPool(name,
+                                          MemoryPool::Heap,
+                                          init_size,
+                                          max_size,
+                                          support_usage_threshold) {
   assert(UseG1GC, "sanity");
 }
 
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
-  return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->eden_space_used();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->survivor_space_committed();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->survivor_space_used();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->old_space_committed();
-}
-
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
-  return g1h->g1mm()->old_space_used();
-}
-
 G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
-                    "G1 Eden",
-                    eden_space_committed(g1h), /* init_size */
+                    "G1 Eden Space",
+                    g1h->g1mm()->eden_space_committed(), /* init_size */
+                    _undefined_max,
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1EdenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = eden_space_committed(_g1h);
+  size_t committed  = _g1mm->eden_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
 
 G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
-                    "G1 Survivor",
-                    survivor_space_committed(g1h), /* init_size */
+                    "G1 Survivor Space",
+                    g1h->g1mm()->survivor_space_committed(), /* init_size */
+                    _undefined_max,
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1SurvivorPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = survivor_space_committed(_g1h);
+  size_t committed  = _g1mm->survivor_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
@@ -104,14 +77,15 @@
 G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
                     "G1 Old Gen",
-                    old_space_committed(g1h), /* init_size */
+                    g1h->g1mm()->old_space_committed(), /* init_size */
+                    _undefined_max,
                     true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
   size_t max_sz     = max_size();
   size_t used       = used_in_bytes();
-  size_t committed  = old_space_committed(_g1h);
+  size_t committed  = _g1mm->old_space_committed();
 
   return MemoryUsage(initial_sz, used, committed, max_sz);
 }
--- a/hotspot/src/share/vm/services/g1MemoryPool.hpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/services/g1MemoryPool.hpp	Wed Jul 05 17:53:04 2017 +0200
@@ -26,12 +26,11 @@
 #define SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
 
 #ifndef SERIALGC
+#include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryUsage.hpp"
 #endif
 
-class G1CollectedHeap;
-
 // This file contains the three classes that represent the memory
 // pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
 // G1OldGenPool. In G1, unlike our other GCs, we do not have a
@@ -50,37 +49,19 @@
 // on this model.
 //
 
-
 // This class is shared by the three G1 memory pool classes
-// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
-// calculate used / committed bytes for these three pools is related
-// (see comment above), we put the calculations in this class so that
-// we can easily share them among the subclasses.
+// (G1EdenPool, G1SurvivorPool, G1OldGenPool).
 class G1MemoryPoolSuper : public CollectedMemoryPool {
 protected:
-  G1CollectedHeap* _g1h;
+  const static size_t _undefined_max = (size_t) -1;
+  G1MonitoringSupport* _g1mm;
 
   // Would only be called from subclasses.
   G1MemoryPoolSuper(G1CollectedHeap* g1h,
                     const char* name,
                     size_t init_size,
+                    size_t max_size,
                     bool support_usage_threshold);
-
-  // The reason why all the code is in static methods is so that it
-  // can be safely called from the constructors of the subclasses.
-
-  static size_t undefined_max() {
-    return (size_t) -1;
-  }
-
-  static size_t eden_space_committed(G1CollectedHeap* g1h);
-  static size_t eden_space_used(G1CollectedHeap* g1h);
-
-  static size_t survivor_space_committed(G1CollectedHeap* g1h);
-  static size_t survivor_space_used(G1CollectedHeap* g1h);
-
-  static size_t old_space_committed(G1CollectedHeap* g1h);
-  static size_t old_space_used(G1CollectedHeap* g1h);
 };
 
 // Memory pool that represents the G1 eden.
@@ -89,10 +70,10 @@
   G1EdenPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return eden_space_used(_g1h);
+    return _g1mm->eden_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
@@ -103,10 +84,10 @@
   G1SurvivorPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return survivor_space_used(_g1h);
+    return _g1mm->survivor_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
@@ -117,10 +98,10 @@
   G1OldGenPool(G1CollectedHeap* g1h);
 
   size_t used_in_bytes() {
-    return old_space_used(_g1h);
+    return _g1mm->old_space_used();
   }
   size_t max_size() const {
-    return undefined_max();
+    return _undefined_max;
   }
   MemoryUsage get_memory_usage();
 };
--- a/hotspot/src/share/vm/utilities/quickSort.cpp	Mon Oct 17 19:06:53 2011 -0700
+++ b/hotspot/src/share/vm/utilities/quickSort.cpp	Wed Jul 05 17:53:04 2017 +0200
@@ -54,16 +54,18 @@
   return 1;
 }
 
-static int test_stdlib_comparator(const void* a, const void* b) {
-  int ai = *(int*)a;
-  int bi = *(int*)b;
-  if (ai == bi) {
-    return 0;
+extern "C" {
+  static int test_stdlib_comparator(const void* a, const void* b) {
+    int ai = *(int*)a;
+    int bi = *(int*)b;
+    if (ai == bi) {
+      return 0;
+    }
+    if (ai < bi) {
+      return -1;
+    }
+    return 1;
   }
-  if (ai < bi) {
-    return -1;
-  }
-  return 1;
 }
 
 void QuickSort::print_array(const char* prefix, int* array, int length) {
@@ -92,7 +94,6 @@
 }
 
 bool QuickSort::test_quick_sort() {
-#if 0
   tty->print_cr("test_quick_sort\n");
   {
     int* test_array = NULL;
@@ -213,7 +214,6 @@
     delete[] test_array;
     delete[] expected_array;
   }
-#endif
   return true;
 }