--- a/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c Tue Sep 09 16:14:40 2014 +0200
@@ -64,7 +64,10 @@
#define THROW_NEW_DEBUGGER_EXCEPTION(str) { throw_new_debugger_exception(env, str); return;}
void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
- (*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
+ jclass clazz;
+ clazz = (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException");
+ CHECK_EXCEPTION;
+ (*env)->ThrowNew(env, clazz, errMsg);
}
struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
@@ -149,11 +152,14 @@
const char* name;
jobject loadObject;
jobject loadObjectList;
+ jstring str;
base = get_lib_base(ph, i);
name = get_lib_name(ph, i);
- loadObject = (*env)->CallObjectMethod(env, this_obj, createLoadObject_ID,
- (*env)->NewStringUTF(env, name), (jlong)0, (jlong)base);
+
+ str = (*env)->NewStringUTF(env, name);
+ CHECK_EXCEPTION;
+ loadObject = (*env)->CallObjectMethod(env, this_obj, createLoadObject_ID, str, (jlong)0, (jlong)base);
CHECK_EXCEPTION;
loadObjectList = (*env)->GetObjectField(env, this_obj, loadObjectList_ID);
CHECK_EXCEPTION;
@@ -298,13 +304,18 @@
JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_lookupByAddress0
(JNIEnv *env, jobject this_obj, jlong addr) {
uintptr_t offset;
+ jobject obj;
+ jstring str;
const char* sym = NULL;
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
if (sym == NULL) return 0;
- return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
- (*env)->NewStringUTF(env, sym), (jlong)offset);
+ str = (*env)->NewStringUTF(env, sym);
+ CHECK_EXCEPTION_(NULL);
+ obj = (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID, str, (jlong)offset);
+ CHECK_EXCEPTION_(NULL);
+ return obj;
}
/*
--- a/hotspot/agent/src/os/solaris/proc/saproc.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/agent/src/os/solaris/proc/saproc.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -90,7 +90,9 @@
*/
static void throwNewDebuggerException(JNIEnv* env, const char* errMsg) {
- env->ThrowNew(env->FindClass("sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
+ jclass clazz = env->FindClass("sun/jvm/hotspot/debugger/DebuggerException");
+ CHECK_EXCEPTION;
+ env->ThrowNew(clazz, errMsg);
}
// JNI ids for some fields, methods
@@ -962,6 +964,7 @@
CHECK_EXCEPTION_(0);
jboolean isCopy;
jlong* ptr = env->GetLongArrayElements(res, &isCopy);
+ CHECK_EXCEPTION_(NULL);
for (int i = 0; i < NPRGREG; i++) {
ptr[i] = (jlong) (uintptr_t) gregs[i];
}
@@ -1253,6 +1256,7 @@
(JNIEnv *env, jobject this_object, jstring name) {
jboolean isCopy;
const char* ptr = env->GetStringUTFChars(name, &isCopy);
+ CHECK_EXCEPTION_(NULL);
char buf[2*SYMBOL_BUF_SIZE + 1];
jstring res = 0;
if (cplus_demangle((char*) ptr, buf, sizeof(buf)) != DEMANGLE_ESPACE) {
@@ -1439,7 +1443,9 @@
"createClosestSymbol", "(Ljava/lang/String;J)Lsun/jvm/hotspot/debugger/cdbg/ClosestSymbol;");
CHECK_EXCEPTION;
- listAdd_ID = env->GetMethodID(env->FindClass("java/util/List"), "add", "(Ljava/lang/Object;)Z");
+ jclass list_clazz = env->FindClass("java/util/List");
+ CHECK_EXCEPTION;
+ listAdd_ID = env->GetMethodID(list_clazz, "add", "(Ljava/lang/Object;)Z");
CHECK_EXCEPTION;
// part of the class sharing workaround
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Tue Sep 09 16:14:40 2014 +0200
@@ -43,8 +43,8 @@
// Mirror class for G1CollectedHeap.
public class G1CollectedHeap extends SharedHeap {
- // HeapRegionSeq _seq;
- static private long hrsFieldOffset;
+ // HeapRegionManager _hrm;
+ static private long hrmFieldOffset;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
// size_t _summary_bytes_used;
@@ -67,7 +67,7 @@
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1CollectedHeap");
- hrsFieldOffset = type.getField("_hrs").getOffset();
+ hrmFieldOffset = type.getField("_hrm").getOffset();
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
@@ -75,7 +75,7 @@
}
public long capacity() {
- return hrs().capacity();
+ return hrm().capacity();
}
public long used() {
@@ -83,13 +83,13 @@
}
public long n_regions() {
- return hrs().length();
+ return hrm().length();
}
- private HeapRegionSeq hrs() {
- Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
- return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
- hrsAddr);
+ private HeapRegionManager hrm() {
+ Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
+ return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class,
+ hrmAddr);
}
public G1MonitoringSupport g1mm() {
@@ -110,7 +110,7 @@
}
private Iterator<HeapRegion> heapRegionIterator() {
- return hrs().heapRegionIterator();
+ return hrm().heapRegionIterator();
}
public void heapRegionIterate(SpaceClosure scl) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionManager.java Tue Sep 09 16:14:40 2014 +0200
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionManager.
+
+public class HeapRegionManager extends VMObject {
+ // G1HeapRegionTable _regions
+ static private long regionsFieldOffset;
+ // uint _committed_length
+ static private CIntegerField numCommittedField;
+
+ static {
+ VM.registerVMInitializedObserver(new Observer() {
+ public void update(Observable o, Object data) {
+ initialize(VM.getVM().getTypeDataBase());
+ }
+ });
+ }
+
+ static private synchronized void initialize(TypeDataBase db) {
+ Type type = db.lookupType("HeapRegionManager");
+
+ regionsFieldOffset = type.getField("_regions").getOffset();
+ numCommittedField = type.getCIntegerField("_num_committed");
+ }
+
+ private G1HeapRegionTable regions() {
+ Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
+ return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
+ regionsAddr);
+ }
+
+ public long capacity() {
+ return length() * HeapRegion.grainBytes();
+ }
+
+ public long length() {
+ return regions().length();
+ }
+
+ public long committedLength() {
+ return numCommittedField.getValue(addr);
+ }
+
+ public Iterator<HeapRegion> heapRegionIterator() {
+ return regions().heapRegionIterator(length());
+ }
+
+ public HeapRegionManager(Address addr) {
+ super(addr);
+ }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Mon Sep 08 16:05:48 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc_implementation.g1;
-
-import java.util.Iterator;
-import java.util.Observable;
-import java.util.Observer;
-
-import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.runtime.VM;
-import sun.jvm.hotspot.runtime.VMObject;
-import sun.jvm.hotspot.runtime.VMObjectFactory;
-import sun.jvm.hotspot.types.AddressField;
-import sun.jvm.hotspot.types.CIntegerField;
-import sun.jvm.hotspot.types.Type;
-import sun.jvm.hotspot.types.TypeDataBase;
-
-// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
-
-public class HeapRegionSeq extends VMObject {
- // G1HeapRegionTable _regions
- static private long regionsFieldOffset;
- // uint _committed_length
- static private CIntegerField numCommittedField;
-
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- static private synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("HeapRegionSeq");
-
- regionsFieldOffset = type.getField("_regions").getOffset();
- numCommittedField = type.getCIntegerField("_num_committed");
- }
-
- private G1HeapRegionTable regions() {
- Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
- return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
- regionsAddr);
- }
-
- public long capacity() {
- return length() * HeapRegion.grainBytes();
- }
-
- public long length() {
- return regions().length();
- }
-
- public long committedLength() {
- return numCommittedField.getValue(addr);
- }
-
- public Iterator<HeapRegion> heapRegionIterator() {
- return regions().heapRegionIterator(length());
- }
-
- public HeapRegionSeq(Address addr) {
- super(addr);
- }
-}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,6 @@
higherDimension = new MetadataField(type.getAddressField("_higher_dimension"), 0);
lowerDimension = new MetadataField(type.getAddressField("_lower_dimension"), 0);
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
- componentMirror = new OopField(type.getOopField("_component_mirror"), 0);
javaLangCloneableName = null;
javaLangObjectName = null;
javaIoSerializableName = null;
@@ -63,7 +62,6 @@
private static MetadataField higherDimension;
private static MetadataField lowerDimension;
private static CIntField vtableLen;
- private static OopField componentMirror;
public Klass getJavaSuper() {
SystemDictionary sysDict = VM.getVM().getSystemDictionary();
@@ -74,7 +72,6 @@
public Klass getHigherDimension() { return (Klass) higherDimension.getValue(this); }
public Klass getLowerDimension() { return (Klass) lowerDimension.getValue(this); }
public long getVtableLen() { return vtableLen.getValue(this); }
- public Oop getComponentMirror() { return componentMirror.getValue(this); }
// constant class names - javaLangCloneable, javaIoSerializable, javaLangObject
// Initialized lazily to avoid initialization ordering dependencies between ArrayKlass and SymbolTable
@@ -144,6 +141,5 @@
visitor.doMetadata(higherDimension, true);
visitor.doMetadata(lowerDimension, true);
visitor.doCInt(vtableLen, true);
- visitor.doOop(componentMirror, true);
}
}
--- a/hotspot/make/aix/makefiles/mapfile-vers-debug Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/aix/makefiles/mapfile-vers-debug Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,6 @@
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
- JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
--- a/hotspot/make/aix/makefiles/mapfile-vers-product Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/aix/makefiles/mapfile-vers-product Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,6 @@
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
- JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
--- a/hotspot/make/bsd/makefiles/gcc.make Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/bsd/makefiles/gcc.make Tue Sep 09 16:14:40 2014 +0200
@@ -325,6 +325,10 @@
else ifeq ($(shell expr $(CC_VER_MAJOR) = 5 \& $(CC_VER_MINOR) = 1), 1)
OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
OPT_CFLAGS/unsafe.o += -O1
+ # Clang 6.0
+ else ifeq ($(shell expr $(CC_VER_MAJOR) = 6 \& $(CC_VER_MINOR) = 0), 1)
+ OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
+ OPT_CFLAGS/unsafe.o += -O1
else
$(error "Update compiler workarounds for Clang $(CC_VER_MAJOR).$(CC_VER_MINOR)")
endif
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -123,7 +123,6 @@
_JVM_GetClassSignature
_JVM_GetClassSigners
_JVM_GetClassTypeAnnotations
- _JVM_GetComponentType
_JVM_GetDeclaredClasses
_JVM_GetDeclaringClass
_JVM_GetEnclosingMethodInfo
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -123,7 +123,6 @@
_JVM_GetClassSignature
_JVM_GetClassSigners
_JVM_GetClassTypeAnnotations
- _JVM_GetComponentType
_JVM_GetDeclaredClasses
_JVM_GetDeclaringClass
_JVM_GetEnclosingMethodInfo
--- a/hotspot/make/bsd/makefiles/mapfile-vers-debug Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-debug Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,6 @@
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
- JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
--- a/hotspot/make/bsd/makefiles/mapfile-vers-product Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-product Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,6 @@
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
- JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
--- a/hotspot/make/linux/makefiles/mapfile-vers-debug Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/linux/makefiles/mapfile-vers-debug Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,6 @@
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
- JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
--- a/hotspot/make/linux/makefiles/mapfile-vers-product Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/linux/makefiles/mapfile-vers-product Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,6 @@
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
- JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
--- a/hotspot/make/solaris/makefiles/mapfile-vers Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/solaris/makefiles/mapfile-vers Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,6 @@
JVM_GetClassNameUTF;
JVM_GetClassSignature;
JVM_GetClassSigners;
- JVM_GetComponentType;
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
--- a/hotspot/make/solaris/makefiles/sparcWorks.make Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,9 @@
VALIDATED_COMPILER_REVS := 5.8
VALIDATED_CC_COMPILER_REVS := 5.8
else
- # Validated compiler for JDK7 is SS12 update 1 + patches (5.10)
- VALIDATED_COMPILER_REVS := 5.10
- VALIDATED_CC_COMPILER_REVS := 5.10
+ # Validated compiler for JDK9 is SS12.3 (5.12)
+ VALIDATED_COMPILER_REVS := 5.12
+ VALIDATED_CC_COMPILER_REVS := 5.12
endif
# Warning messages about not using the above validated versions
--- a/hotspot/make/windows/makefiles/vm.make Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/make/windows/makefiles/vm.make Tue Sep 09 16:14:40 2014 +0200
@@ -34,6 +34,9 @@
CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT"
!else
CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
+!if "$(BUILDARCH)" == "amd64"
+CXX_FLAGS=$(CXX_FLAGS) /homeparams
+!endif
!endif
!if "$(Variant)" == "compiler1"
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -1678,14 +1678,9 @@
dlclose(handle);
#elif defined(__APPLE__)
- uint32_t count;
- uint32_t i;
-
- count = _dyld_image_count();
- for (i = 1; i < count; i++) {
- const char *name = _dyld_get_image_name(i);
- intptr_t slide = _dyld_get_image_vmaddr_slide(i);
- st->print_cr(PTR_FORMAT " \t%s", slide, name);
+ for (uint32_t i = 1; i < _dyld_image_count(); i++) {
+ st->print_cr(PTR_FORMAT " \t%s", _dyld_get_image_header(i),
+ _dyld_get_image_name(i));
}
#else
st->print_cr("Error: Cannot print dynamic libraries.");
--- a/hotspot/src/os/windows/vm/os_windows.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -135,11 +135,6 @@
if (ForceTimeHighResolution)
timeEndPeriod(1L);
- // Workaround for issue when a custom launcher doesn't call
- // DestroyJavaVM and NMT is trying to track memory when free is
- // called from a static destructor
- MemTracker::shutdown();
-
break;
default:
break;
@@ -414,6 +409,8 @@
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
+extern jint volatile vm_getting_terminated;
+
// Thread start routine for all new Java threads
static unsigned __stdcall java_start(Thread* thread) {
// Try to randomize the cache line index of hot stack frames.
@@ -435,9 +432,17 @@
}
}
+ // Diagnostic code to investigate JDK-6573254 (Part I)
+ unsigned res = 90115; // non-java thread
+ if (thread->is_Java_thread()) {
+ JavaThread* java_thread = (JavaThread*)thread;
+ res = java_lang_Thread::is_daemon(java_thread->threadObj())
+ ? 70115 // java daemon thread
+ : 80115; // java non-daemon thread
+ }
// Install a win32 structured exception handler around every thread created
- // by VM, so VM can genrate error dump when an exception occurred in non-
+ // by VM, so VM can generate error dump when an exception occurred in non-
// Java thread (e.g. VM thread).
__try {
thread->run();
@@ -453,6 +458,11 @@
Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
}
+ // Diagnostic code to investigate JDK-6573254 (Part II)
+ if (OrderAccess::load_acquire(&vm_getting_terminated)) {
+ return res;
+ }
+
return 0;
}
--- a/hotspot/src/share/tools/ProjectCreator/BuildConfig.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/BuildConfig.java Tue Sep 09 16:14:40 2014 +0200
@@ -504,7 +504,7 @@
super.init(includes, defines);
- getV("CompilerFlags").addAll(getCI().getDebugCompilerFlags(getOptFlag()));
+ getV("CompilerFlags").addAll(getCI().getDebugCompilerFlags(getOptFlag(), get("PlatformName")));
getV("LinkerFlags").addAll(getCI().getDebugLinkerFlags());
}
}
@@ -619,7 +619,7 @@
abstract class CompilerInterface {
abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir);
abstract Vector getBaseLinkerFlags(String outDir, String outDll, String platformName);
- abstract Vector getDebugCompilerFlags(String opt);
+ abstract Vector getDebugCompilerFlags(String opt, String platformName);
abstract Vector getDebugLinkerFlags();
abstract void getAdditionalNonKernelLinkerFlags(Vector rv);
abstract Vector getProductCompilerFlags();
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Tue Sep 09 16:14:40 2014 +0200
@@ -357,7 +357,7 @@
}
@Override
- Vector getDebugCompilerFlags(String opt) {
+ Vector getDebugCompilerFlags(String opt, String platformName) {
Vector rv = new Vector();
// Set /On option
@@ -369,6 +369,10 @@
addAttr(rv, "RuntimeLibrary", "MultiThreadedDLL");
// Set /Oy- option
addAttr(rv, "OmitFramePointers", "false");
+ // Set /homeparams for x64 debug builds
+ if(platformName.equals("x64")) {
+ addAttr(rv, "AdditionalOptions", "/homeparams");
+ }
return rv;
}
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Tue Sep 09 16:14:40 2014 +0200
@@ -284,7 +284,7 @@
}
- Vector getDebugCompilerFlags(String opt) {
+ Vector getDebugCompilerFlags(String opt, String platformName) {
Vector rv = new Vector();
getDebugCompilerFlags_common(opt, rv);
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java Tue Sep 09 16:14:40 2014 +0200
@@ -48,7 +48,7 @@
}
- Vector getDebugCompilerFlags(String opt) {
+ Vector getDebugCompilerFlags(String opt, String platformName) {
Vector rv = new Vector();
getDebugCompilerFlags_common(opt,rv);
--- a/hotspot/src/share/vm/ci/ciArrayKlass.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciArrayKlass.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,11 +52,6 @@
ciType* base_element_type(); // JLS calls this the "element type"
bool is_leaf_type(); // No subtypes of this array type.
- ciInstance* component_mirror() {
- // This is a real field in ArrayKlass, but we derive it from element_type.
- return element_type()->java_mirror();
- }
-
// What kind of vmObject is this?
bool is_array_klass() const { return true; }
bool is_java_klass() const { return true; }
--- a/hotspot/src/share/vm/classfile/classFileStream.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classFileStream.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
}
-ClassFileStream::ClassFileStream(u1* buffer, int length, char* source) {
+ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) {
_buffer_start = buffer;
_buffer_end = buffer + length;
_current = buffer;
--- a/hotspot/src/share/vm/classfile/classFileStream.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classFileStream.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,20 +39,20 @@
u1* _buffer_start; // Buffer bottom
u1* _buffer_end; // Buffer top (one past last element)
u1* _current; // Current buffer position
- char* _source; // Source of stream (directory name, ZIP/JAR archive name)
+ const char* _source; // Source of stream (directory name, ZIP/JAR archive name)
bool _need_verify; // True if verification is on for the class file
void truncated_file_error(TRAPS);
public:
// Constructor
- ClassFileStream(u1* buffer, int length, char* source);
+ ClassFileStream(u1* buffer, int length, const char* source);
// Buffer access
u1* buffer() const { return _buffer_start; }
int length() const { return _buffer_end - _buffer_start; }
u1* current() const { return _current; }
void set_current(u1* pos) { _current = pos; }
- char* source() const { return _source; }
+ const char* source() const { return _source; }
void set_verify(bool flag) { _need_verify = flag; }
void check_truncated_file(bool b, TRAPS) {
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -189,9 +189,10 @@
return false;
}
-ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
- _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
- strcpy(_dir, dir);
+ClassPathDirEntry::ClassPathDirEntry(const char* dir) : ClassPathEntry() {
+ char* copy = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
+ strcpy(copy, dir);
+ _dir = copy;
}
@@ -235,8 +236,9 @@
ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() {
_zip = zip;
- _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
- strcpy(_zip_name, zip_name);
+ char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
+ strcpy(copy, zip_name);
+ _zip_name = copy;
}
ClassPathZipEntry::~ClassPathZipEntry() {
@@ -304,7 +306,7 @@
}
}
-LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
+LazyClassPathEntry::LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
_path = os::strdup_check_oom(path);
_st = *st;
_meta_index = NULL;
@@ -314,7 +316,7 @@
}
LazyClassPathEntry::~LazyClassPathEntry() {
- os::free(_path);
+ os::free((void*)_path);
}
bool LazyClassPathEntry::is_jar_file() {
@@ -563,17 +565,19 @@
void ClassLoader::setup_bootstrap_search_path() {
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
- char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
- if (!PrintSharedArchiveAndExit) {
+ const char* sys_class_path = Arguments::get_sysclasspath();
+ if (PrintSharedArchiveAndExit) {
+ // Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
+ // the same as the bootcp of the shared archive.
+ } else {
trace_class_path("[Bootstrap loader class path=", sys_class_path);
}
#if INCLUDE_CDS
if (DumpSharedSpaces) {
- _shared_paths_misc_info->add_boot_classpath(Arguments::get_sysclasspath());
+ _shared_paths_misc_info->add_boot_classpath(sys_class_path);
}
#endif
setup_search_path(sys_class_path);
- os::free(sys_class_path);
}
#if INCLUDE_CDS
@@ -593,7 +597,7 @@
}
#endif
-void ClassLoader::setup_search_path(char *class_path) {
+void ClassLoader::setup_search_path(const char *class_path) {
int offset = 0;
int len = (int)strlen(class_path);
int end = 0;
@@ -620,7 +624,7 @@
}
}
-ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st,
+ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st,
bool lazy, bool throw_exception, TRAPS) {
JavaThread* thread = JavaThread::current();
if (lazy) {
@@ -687,11 +691,8 @@
struct stat st;
if (os::stat(path, &st) == 0) {
if ((st.st_mode & S_IFREG) == S_IFREG) {
- char orig_path[JVM_MAXPATHLEN];
char canonical_path[JVM_MAXPATHLEN];
-
- strcpy(orig_path, path);
- if (get_canonical_path(orig_path, canonical_path, JVM_MAXPATHLEN)) {
+ if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
char* error_msg = NULL;
jzfile* zip;
{
@@ -737,7 +738,7 @@
}
// Returns true IFF the file/dir exists and the entry was successfully created.
-bool ClassLoader::update_class_path_entry_list(char *path,
+bool ClassLoader::update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool throw_exception) {
struct stat st;
@@ -762,8 +763,8 @@
if (DumpSharedSpaces) {
_shared_paths_misc_info->add_nonexist_path(path);
}
+#endif
return false;
-#endif
}
}
@@ -1269,11 +1270,17 @@
}
-bool ClassLoader::get_canonical_path(char* orig, char* out, int len) {
+bool ClassLoader::get_canonical_path(const char* orig, char* out, int len) {
assert(orig != NULL && out != NULL && len > 0, "bad arguments");
if (CanonicalizeEntry != NULL) {
- JNIEnv* env = JavaThread::current()->jni_environment();
- if ((CanonicalizeEntry)(env, os::native_path(orig), out, len) < 0) {
+ JavaThread* THREAD = JavaThread::current();
+ JNIEnv* env = THREAD->jni_environment();
+ ResourceMark rm(THREAD);
+
+ // os::native_path writes into orig_copy
+ char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(orig)+1);
+ strcpy(orig_copy, orig);
+ if ((CanonicalizeEntry)(env, os::native_path(orig_copy), out, len) < 0) {
return false;
}
} else {
--- a/hotspot/src/share/vm/classfile/classLoader.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -72,11 +72,11 @@
class ClassPathDirEntry: public ClassPathEntry {
private:
- char* _dir; // Name of directory
+ const char* _dir; // Name of directory
public:
bool is_jar_file() { return false; }
const char* name() { return _dir; }
- ClassPathDirEntry(char* dir);
+ ClassPathDirEntry(const char* dir);
ClassFileStream* open_stream(const char* name, TRAPS);
// Debugging
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
@@ -100,8 +100,8 @@
class ClassPathZipEntry: public ClassPathEntry {
private:
- jzfile* _zip; // The zip archive
- char* _zip_name; // Name of zip archive
+ jzfile* _zip; // The zip archive
+ const char* _zip_name; // Name of zip archive
public:
bool is_jar_file() { return true; }
const char* name() { return _zip_name; }
@@ -119,7 +119,7 @@
// For lazier loading of boot class path entries
class LazyClassPathEntry: public ClassPathEntry {
private:
- char* _path; // dir or file
+ const char* _path; // dir or file
struct stat _st;
MetaIndex* _meta_index;
bool _has_error;
@@ -129,7 +129,7 @@
public:
bool is_jar_file();
const char* name() { return _path; }
- LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception);
+ LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception);
virtual ~LazyClassPathEntry();
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
@@ -216,17 +216,17 @@
static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
int start_index);
static void setup_bootstrap_search_path();
- static void setup_search_path(char *class_path);
+ static void setup_search_path(const char *class_path);
static void load_zip_library();
- static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
+ static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
bool lazy, bool throw_exception, TRAPS);
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
- static bool get_canonical_path(char* orig, char* out, int len);
+ static bool get_canonical_path(const char* orig, char* out, int len);
public:
- static bool update_class_path_entry_list(char *path,
+ static bool update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool throw_exception=true);
static void print_bootclasspath();
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -332,27 +332,6 @@
}
}
-#ifdef ASSERT
-class AllAliveClosure : public OopClosure {
- BoolObjectClosure* _is_alive_closure;
- bool _found_dead;
- public:
- AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
- template <typename T> void do_oop_work(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (!_is_alive_closure->do_object_b(obj)) {
- _found_dead = true;
- }
- }
- }
- void do_oop(oop* p) { do_oop_work<oop>(p); }
- void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
- bool found_dead() { return _found_dead; }
-};
-#endif
-
oop ClassLoaderData::keep_alive_object() const {
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader();
@@ -362,15 +341,6 @@
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|| is_alive_closure->do_object_b(keep_alive_object());
-#ifdef ASSERT
- if (alive) {
- AllAliveClosure all_alive_closure(is_alive_closure);
- KlassToOopClosure klass_closure(&all_alive_closure);
- const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
- assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
- }
-#endif
-
return alive;
}
--- a/hotspot/src/share/vm/classfile/classLoaderExt.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderExt.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -59,8 +59,8 @@
};
- static void add_class_path_entry(char* path, bool check_for_duplicates,
- ClassPathEntry* new_entry) {
+ static void add_class_path_entry(const char* path, bool check_for_duplicates,
+ ClassPathEntry* new_entry) {
ClassLoader::add_to_list(new_entry);
}
static void setup_search_paths() {}
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -620,7 +620,6 @@
// Two-way link between the array klass and its component mirror:
// (array_klass) k -> mirror -> component_mirror -> array_klass -> k
set_component_mirror(mirror(), comp_mirror());
- ArrayKlass::cast(k())->set_component_mirror(comp_mirror());
set_array_klass(comp_mirror(), k());
} else {
assert(k->oop_is_instance(), "Must be");
@@ -682,10 +681,9 @@
}
void java_lang_Class::set_component_mirror(oop java_class, oop comp_mirror) {
- if (_component_mirror_offset != 0) {
+ assert(_component_mirror_offset != 0, "must be set");
java_class->obj_field_put(_component_mirror_offset, comp_mirror);
}
-}
oop java_lang_Class::component_mirror(oop java_class) {
assert(_component_mirror_offset != 0, "must be set");
return java_class->obj_field(_component_mirror_offset);
@@ -875,22 +873,27 @@
assert(!offsets_computed, "offsets should be initialized only once");
offsets_computed = true;
- Klass* klass_oop = SystemDictionary::Class_klass();
+ Klass* k = SystemDictionary::Class_klass();
// The classRedefinedCount field is only present starting in 1.5,
// so don't go fatal.
compute_optional_offset(classRedefinedCount_offset,
- klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
+ k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
// Needs to be optional because the old build runs Queens during bootstrapping
// and jdk8-9 doesn't have coordinated pushes yet.
compute_optional_offset(_class_loader_offset,
- klass_oop, vmSymbols::classLoader_name(),
+ k, vmSymbols::classLoader_name(),
vmSymbols::classloader_signature());
- compute_optional_offset(_component_mirror_offset,
- klass_oop, vmSymbols::componentType_name(),
+ compute_offset(_component_mirror_offset,
+ k, vmSymbols::componentType_name(),
vmSymbols::class_signature());
+ // Init lock is a C union with component_mirror. Only instanceKlass mirrors have
+ // init_lock and only ArrayKlass mirrors have component_mirror. Since both are oops
+ // GC treats them the same.
+ _init_lock_offset = _component_mirror_offset;
+
CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -222,7 +222,6 @@
macro(java_lang_Class, oop_size, int_signature, false) \
macro(java_lang_Class, static_oop_field_count, int_signature, false) \
macro(java_lang_Class, protection_domain, object_signature, false) \
- macro(java_lang_Class, init_lock, object_signature, false) \
macro(java_lang_Class, signers, object_signature, false)
class java_lang_Class : AllStatic {
--- a/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -139,7 +139,7 @@
if (timestamp != st.st_mtime) {
return fail("Timestamp mismatch");
}
- if (filesize != st.st_size) {
+ if (filesize != st.st_size) {
return fail("File size mismatch");
}
}
--- a/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -165,7 +165,7 @@
out->print("Expecting that %s does not exist", path);
break;
case REQUIRED:
- out->print("Expecting that file %s must exist and not altered", path);
+ out->print("Expecting that file %s must exist and is not altered", path);
break;
default:
ShouldNotReachHere();
--- a/hotspot/src/share/vm/classfile/stringTable.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -109,7 +109,7 @@
}
}
// If the bucket size is too deep check if this hash code is insufficient.
- if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
+ if (count >= rehash_count && !needs_rehashing()) {
_needs_rehashing = check_rehash_table(count);
}
return NULL;
--- a/hotspot/src/share/vm/classfile/stringTable.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/stringTable.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -28,7 +28,7 @@
#include "memory/allocation.inline.hpp"
#include "utilities/hashtable.hpp"
-class StringTable : public Hashtable<oop, mtSymbol> {
+class StringTable : public RehashableHashtable<oop, mtSymbol> {
friend class VMStructs;
friend class Symbol;
@@ -55,11 +55,11 @@
// in the range [start_idx, end_idx).
static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed);
- StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
+ StringTable() : RehashableHashtable<oop, mtSymbol>((int)StringTableSize,
sizeof (HashtableEntry<oop, mtSymbol>)) {}
StringTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
- : Hashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
+ : RehashableHashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
number_of_entries) {}
public:
// The string table
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -201,7 +201,7 @@
}
}
// If the bucket size is too deep check if this hash code is insufficient.
- if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
+ if (count >= rehash_count && !needs_rehashing()) {
_needs_rehashing = check_rehash_table(count);
}
return NULL;
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -73,7 +73,7 @@
operator Symbol*() { return _temp; }
};
-class SymbolTable : public Hashtable<Symbol*, mtSymbol> {
+class SymbolTable : public RehashableHashtable<Symbol*, mtSymbol> {
friend class VMStructs;
friend class ClassFileParser;
@@ -109,10 +109,10 @@
Symbol* lookup(int index, const char* name, int len, unsigned int hash);
SymbolTable()
- : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
+ : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
- : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
+ : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
number_of_entries) {}
// Arena for permanent symbols (null class loader) that are never unloaded
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -1243,7 +1243,6 @@
tty->print_cr("]");
}
-#if INCLUDE_CDS
if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
@@ -1252,7 +1251,6 @@
classlist_file->flush();
}
}
-#endif
// notify a class loaded from shared object
ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
@@ -1260,7 +1258,7 @@
}
return ik;
}
-#endif
+#endif // INCLUDE_CDS
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -399,7 +399,6 @@
template(oop_size_name, "oop_size") \
template(static_oop_field_count_name, "static_oop_field_count") \
template(protection_domain_name, "protection_domain") \
- template(init_lock_name, "init_lock") \
template(signers_name, "signers_name") \
template(loader_data_name, "loader_data") \
template(dependencies_name, "dependencies") \
@@ -747,8 +746,6 @@
do_name( isPrimitive_name, "isPrimitive") \
do_intrinsic(_getSuperclass, java_lang_Class, getSuperclass_name, void_class_signature, F_RN) \
do_name( getSuperclass_name, "getSuperclass") \
- do_intrinsic(_getComponentType, java_lang_Class, getComponentType_name, void_class_signature, F_RN) \
- do_name( getComponentType_name, "getComponentType") \
\
do_intrinsic(_getClassAccessFlags, sun_reflect_Reflection, getClassAccessFlags_name, class_int_signature, F_SN) \
do_name( getClassAccessFlags_name, "getClassAccessFlags") \
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -328,9 +328,11 @@
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
const char* gen_name = "old";
+ GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
// Generation Counters - generation 1, 1 subspace
- _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
+ _gen_counters = new GenerationCounters(gen_name, 1, 1,
+ gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
_space_counters = new GSpaceCounters(gen_name, 0,
_virtual_space.reserved_size(),
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -34,8 +34,8 @@
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
@@ -434,10 +434,6 @@
}
}
-bool ConcurrentMark::not_yet_marked(oop obj) const {
- return _g1h->is_obj_ill(obj);
-}
-
CMRootRegions::CMRootRegions() :
_young_list(NULL), _cm(NULL), _scan_in_progress(false),
_should_abort(false), _next_survivor(NULL) { }
@@ -892,7 +888,16 @@
}
virtual bool doHeapRegion(HeapRegion* r) {
- return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
+ // This closure can be called concurrently to the mutator, so we must make sure
+ // that the result of the getNextMarkedWordAddress() call is compared to the
+ // value passed to it as limit to detect any found bits.
+ // We can use the region's orig_end() for the limit and the comparison value
+ // as it always contains the "real" end of the region that never changes and
+ // has no side effects.
+ // Due to the latter, there can also be no problem with the compiler generating
+ // reloads of the orig_end() call.
+ HeapWord* end = r->orig_end();
+ return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
}
};
@@ -1117,20 +1122,17 @@
if (!_cm->has_aborted()) {
do {
double start_vtime_sec = os::elapsedVTime();
- double start_time_sec = os::elapsedTime();
double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
the_task->do_marking_step(mark_step_duration_ms,
true /* do_termination */,
false /* is_serial*/);
- double end_time_sec = os::elapsedTime();
double end_vtime_sec = os::elapsedVTime();
double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
- double elapsed_time_sec = end_time_sec - start_time_sec;
_cm->clear_has_overflown();
- bool ret = _cm->do_yield_check(worker_id);
+ _cm->do_yield_check(worker_id);
jlong sleep_time_ms;
if (!_cm->has_aborted() && the_task->has_aborted()) {
@@ -1140,17 +1142,6 @@
os::sleep(Thread::current(), sleep_time_ms, false);
SuspendibleThreadSet::join();
}
- double end_time2_sec = os::elapsedTime();
- double elapsed_time2_sec = end_time2_sec - start_time_sec;
-
-#if 0
- gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
- "overhead %1.4lf",
- elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
- the_task->conc_overhead(os::elapsedTime()) * 8.0);
- gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
- elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
-#endif
} while (!_cm->has_aborted() && the_task->has_aborted());
}
the_task->record_end_time();
@@ -1409,7 +1400,7 @@
void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out");
- BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
+ BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit.
_region_bm->par_at_put(index, true);
@@ -1597,7 +1588,7 @@
if (_verbose) {
gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
"expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
- hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
+ hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
}
failures += 1;
}
@@ -1606,7 +1597,7 @@
// (which was just calculated) region bit maps.
// We're not OK if the bit in the calculated expected region
// bitmap is set and the bit in the actual region bitmap is not.
- BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
+ BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
bool expected = _exp_region_bm->at(index);
bool actual = _region_bm->at(index);
@@ -1614,7 +1605,7 @@
if (_verbose) {
gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
"expected: %s, actual: %s",
- hr->hrs_index(),
+ hr->hrm_index(),
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
}
failures += 1;
@@ -1635,7 +1626,7 @@
if (_verbose) {
gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
"expected: %s, actual: %s",
- hr->hrs_index(), i,
+ hr->hrm_index(), i,
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
}
failures += 1;
@@ -2949,11 +2940,6 @@
_nextMarkBitMap->clearRange(mr);
}
-void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
- clearRangePrevBitmap(mr);
- clearRangeNextBitmap(mr);
-}
-
HeapRegion*
ConcurrentMark::claim_region(uint worker_id) {
// "checkpoint" the finger
@@ -3256,7 +3242,7 @@
assert(limit_idx <= end_idx, "or else use atomics");
// Aggregate the "stripe" in the count data associated with hr.
- uint hrs_index = hr->hrs_index();
+ uint hrm_index = hr->hrm_index();
size_t marked_bytes = 0;
for (uint i = 0; i < _max_worker_id; i += 1) {
@@ -3265,7 +3251,7 @@
// Fetch the marked_bytes in this region for task i and
// add it to the running total for this region.
- marked_bytes += marked_bytes_array[hrs_index];
+ marked_bytes += marked_bytes_array[hrm_index];
// Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
// into the global card bitmap.
@@ -3499,17 +3485,6 @@
}
}
-bool ConcurrentMark::containing_card_is_marked(void* p) {
- size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
- return _card_bm.at(offset >> CardTableModRefBS::card_shift);
-}
-
-bool ConcurrentMark::containing_cards_are_marked(void* start,
- void* last) {
- return containing_card_is_marked(start) &&
- containing_card_is_marked(last);
-}
-
#ifndef PRODUCT
// for debugging purposes
void ConcurrentMark::print_finger() {
@@ -3762,7 +3737,7 @@
if (_cm->verbose_medium()) {
gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
- "scanned = %d%s, refs reached = %d%s",
+ "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
_worker_id, last_interval_ms,
_words_scanned,
(_words_scanned >= _words_scanned_limit) ? " (*)" : "",
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -683,7 +683,9 @@
return _task_queues->steal(worker_id, hash_seed, obj);
}
- ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
+ ConcurrentMark(G1CollectedHeap* g1h,
+ G1RegionToSpaceMapper* prev_bitmap_storage,
+ G1RegionToSpaceMapper* next_bitmap_storage);
~ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; }
@@ -712,8 +714,10 @@
// inconsistent) and always passing the size. hr is the region that
// contains the object and it's passed optionally from callers who
// might already have it (no point in recalculating it).
- inline void grayRoot(oop obj, size_t word_size,
- uint worker_id, HeapRegion* hr = NULL);
+ inline void grayRoot(oop obj,
+ size_t word_size,
+ uint worker_id,
+ HeapRegion* hr = NULL);
// It iterates over the heap and for each object it comes across it
// will dump the contents of its reference fields, as well as
@@ -734,7 +738,8 @@
// AND MARKED : indicates that an object is both explicitly and
// implicitly live (it should be one or the other, not both)
void print_reachable(const char* str,
- VerifyOption vo, bool all) PRODUCT_RETURN;
+ VerifyOption vo,
+ bool all) PRODUCT_RETURN;
// Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap();
@@ -771,12 +776,11 @@
// this carefully!
inline void markPrev(oop p);
- // Clears marks for all objects in the given range, for the prev,
- // next, or both bitmaps. NB: the previous bitmap is usually
+ // Clears marks for all objects in the given range, for the prev or
+ // next bitmaps. NB: the previous bitmap is usually
// read-only, so use this carefully!
void clearRangePrevBitmap(MemRegion mr);
void clearRangeNextBitmap(MemRegion mr);
- void clearRangeBothBitmaps(MemRegion mr);
// Notify data structures that a GC has started.
void note_start_of_gc() {
@@ -798,21 +802,6 @@
bool verify_thread_buffers,
bool verify_fingers) PRODUCT_RETURN;
- bool isMarked(oop p) const {
- assert(p != NULL && p->is_oop(), "expected an oop");
- HeapWord* addr = (HeapWord*)p;
- assert(addr >= _nextMarkBitMap->startWord() ||
- addr < _nextMarkBitMap->endWord(), "in a region");
-
- return _nextMarkBitMap->isMarked(addr);
- }
-
- inline bool not_yet_marked(oop p) const;
-
- // XXX Debug code
- bool containing_card_is_marked(void* p);
- bool containing_cards_are_marked(void* start, void* last);
-
bool isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
@@ -898,7 +887,8 @@
// marked_bytes array slot for the given HeapRegion.
// Sets the bits in the given card bitmap that are associated with the
// cards that are spanned by the memory region.
- inline void count_region(MemRegion mr, HeapRegion* hr,
+ inline void count_region(MemRegion mr,
+ HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
@@ -906,56 +896,27 @@
// data structures for the given worker id.
inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
- // Counts the given memory region in the task/worker counting
- // data structures for the given worker id.
- inline void count_region(MemRegion mr, uint worker_id);
-
// Counts the given object in the given task/worker counting
// data structures.
- inline void count_object(oop obj, HeapRegion* hr,
+ inline void count_object(oop obj,
+ HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
- // Counts the given object in the task/worker counting data
- // structures for the given worker id.
- inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
-
// Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures.
- inline bool par_mark_and_count(oop obj, HeapRegion* hr,
+ inline bool par_mark_and_count(oop obj,
+ HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
- inline bool par_mark_and_count(oop obj, size_t word_size,
- HeapRegion* hr, uint worker_id);
-
- // Attempts to mark the given object and, if successful, counts
- // the object in the task/worker counting structures for the
- // given worker id.
- inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
-
- // Similar to the above routine but we don't know the heap region that
- // contains the object to be marked/counted, which this routine looks up.
- inline bool par_mark_and_count(oop obj, uint worker_id);
-
- // Similar to the above routine but there are times when we cannot
- // safely calculate the size of obj due to races and we, therefore,
- // pass the size in as a parameter. It is the caller's responsibility
- // to ensure that the size passed in for obj is valid.
- inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
-
- // Unconditionally mark the given object, and unconditionally count
- // the object in the counting structures for worker id 0.
- // Should *not* be called from parallel code.
- inline bool mark_and_count(oop obj, HeapRegion* hr);
-
- // Similar to the above routine but we don't know the heap region that
- // contains the object to be marked/counted, which this routine looks up.
- // Should *not* be called from parallel code.
- inline bool mark_and_count(oop obj);
+ inline bool par_mark_and_count(oop obj,
+ size_t word_size,
+ HeapRegion* hr,
+ uint worker_id);
// Returns true if initialization was successfully completed.
bool completed_initialization() const {
@@ -1227,9 +1188,12 @@
_finger = new_finger;
}
- CMTask(uint worker_id, ConcurrentMark *cm,
- size_t* marked_bytes, BitMap* card_bm,
- CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
+ CMTask(uint worker_id,
+ ConcurrentMark *cm,
+ size_t* marked_bytes,
+ BitMap* card_bm,
+ CMTaskQueue* task_queue,
+ CMTaskQueueSet* task_queues);
// it prints statistics associated with this task
void print_stats();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -86,7 +86,7 @@
HeapWord* start = mr.start();
HeapWord* end = mr.end();
size_t region_size_bytes = mr.byte_size();
- uint index = hr->hrs_index();
+ uint index = hr->hrm_index();
assert(!hr->continuesHumongous(), "should not be HC region");
assert(hr == g1h->heap_region_containing(start), "sanity");
@@ -125,14 +125,6 @@
count_region(mr, hr, marked_bytes_array, task_card_bm);
}
-// Counts the given memory region, which may be a single object, in the
-// task/worker counting data structures for the given worker id.
-inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
- HeapWord* addr = mr.start();
- HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
- count_region(mr, hr, worker_id);
-}
-
// Counts the given object in the given task/worker counting data structures.
inline void ConcurrentMark::count_object(oop obj,
HeapRegion* hr,
@@ -142,17 +134,6 @@
count_region(mr, hr, marked_bytes_array, task_card_bm);
}
-// Counts the given object in the task/worker counting data
-// structures for the given worker id.
-inline void ConcurrentMark::count_object(oop obj,
- HeapRegion* hr,
- uint worker_id) {
- size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
- BitMap* task_card_bm = count_card_bitmap_for(worker_id);
- HeapWord* addr = (HeapWord*) obj;
- count_object(obj, hr, marked_bytes_array, task_card_bm);
-}
-
// Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
@@ -184,63 +165,6 @@
return false;
}
-// Attempts to mark the given object and, if successful, counts
-// the object in the task/worker counting structures for the
-// given worker id.
-inline bool ConcurrentMark::par_mark_and_count(oop obj,
- HeapRegion* hr,
- uint worker_id) {
- HeapWord* addr = (HeapWord*)obj;
- if (_nextMarkBitMap->parMark(addr)) {
- // Update the task specific count data for the object.
- count_object(obj, hr, worker_id);
- return true;
- }
- return false;
-}
-
-// As above - but we don't know the heap region containing the
-// object and so have to supply it.
-inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
- HeapWord* addr = (HeapWord*)obj;
- HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
- return par_mark_and_count(obj, hr, worker_id);
-}
-
-// Similar to the above routine but we already know the size, in words, of
-// the object that we wish to mark/count
-inline bool ConcurrentMark::par_mark_and_count(oop obj,
- size_t word_size,
- uint worker_id) {
- HeapWord* addr = (HeapWord*)obj;
- if (_nextMarkBitMap->parMark(addr)) {
- // Update the task specific count data for the object.
- MemRegion mr(addr, word_size);
- count_region(mr, worker_id);
- return true;
- }
- return false;
-}
-
-// Unconditionally mark the given object, and unconditionally count
-// the object in the counting structures for worker id 0.
-// Should *not* be called from parallel code.
-inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
- HeapWord* addr = (HeapWord*)obj;
- _nextMarkBitMap->mark(addr);
- // Update the task specific count data for the object.
- count_object(obj, hr, 0 /* worker_id */);
- return true;
-}
-
-// As above - but we don't have the heap region containing the
-// object, so we have to supply it.
-inline bool ConcurrentMark::mark_and_count(oop obj) {
- HeapWord* addr = (HeapWord*)obj;
- HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
- return mark_and_count(obj, hr);
-}
-
inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
HeapWord* start_addr = MAX2(startWord(), mr.start());
HeapWord* end_addr = MIN2(endWord(), mr.end());
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -30,14 +30,7 @@
#include "runtime/java.hpp"
#include "services/memTracker.hpp"
-PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
- // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
- // retrieve it here since this would cause firing of several asserts. The code
- // executed after commit of a region already needs to do some re-initialization of
- // the HeapRegion, so we combine that.
-}
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetSharedArray
@@ -59,10 +52,10 @@
if (TraceBlockOffsetTable) {
gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
gclog_or_tty->print_cr(" "
- " rs.base(): " INTPTR_FORMAT
- " rs.size(): " INTPTR_FORMAT
- " rs end(): " INTPTR_FORMAT,
- bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
+ " rs.base(): " PTR_FORMAT
+ " rs.size(): " SIZE_FORMAT
+ " rs end(): " PTR_FORMAT,
+ p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
}
}
@@ -72,26 +65,16 @@
return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
}
-void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
- set_offset_array(index_for(left), index_for(right -1), offset);
-}
-
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetArray
//////////////////////////////////////////////////////////////////////
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
- MemRegion mr, bool init_to_zero) :
+ MemRegion mr) :
G1BlockOffsetTable(mr.start(), mr.end()),
_unallocated_block(_bottom),
- _array(array), _gsp(NULL),
- _init_to_zero(init_to_zero) {
+ _array(array), _gsp(NULL) {
assert(_bottom <= _end, "arguments out of order");
- if (!_init_to_zero) {
- // initialize cards to point back to mr.start()
- set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
- _array->set_offset_array(0, 0); // set first card to 0
- }
}
void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
@@ -181,93 +164,6 @@
DEBUG_ONLY(check_all_cards(start_card, end_card);)
}
-// The block [blk_start, blk_end) has been allocated;
-// adjust the block offset table to represent this information;
-// right-open interval: [blk_start, blk_end)
-void
-G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
- mark_block(blk_start, blk_end);
- allocated(blk_start, blk_end);
-}
-
-// Adjust BOT to show that a previously whole block has been split
-// into two.
-void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
- size_t left_blk_size) {
- // Verify that the BOT shows [blk, blk + blk_size) to be one block.
- verify_single_block(blk, blk_size);
- // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
- // is one single block.
- mark_block(blk + left_blk_size, blk + blk_size);
-}
-
-
-// Action_mark - update the BOT for the block [blk_start, blk_end).
-// Current typical use is for splitting a block.
-// Action_single - update the BOT for an allocation.
-// Action_verify - BOT verification.
-void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
- HeapWord* blk_end,
- Action action) {
- assert(Universe::heap()->is_in_reserved(blk_start),
- "reference must be into the heap");
- assert(Universe::heap()->is_in_reserved(blk_end-1),
- "limit must be within the heap");
- // This is optimized to make the test fast, assuming we only rarely
- // cross boundaries.
- uintptr_t end_ui = (uintptr_t)(blk_end - 1);
- uintptr_t start_ui = (uintptr_t)blk_start;
- // Calculate the last card boundary preceding end of blk
- intptr_t boundary_before_end = (intptr_t)end_ui;
- clear_bits(boundary_before_end, right_n_bits(LogN));
- if (start_ui <= (uintptr_t)boundary_before_end) {
- // blk starts at or crosses a boundary
- // Calculate index of card on which blk begins
- size_t start_index = _array->index_for(blk_start);
- // Index of card on which blk ends
- size_t end_index = _array->index_for(blk_end - 1);
- // Start address of card on which blk begins
- HeapWord* boundary = _array->address_for_index(start_index);
- assert(boundary <= blk_start, "blk should start at or after boundary");
- if (blk_start != boundary) {
- // blk starts strictly after boundary
- // adjust card boundary and start_index forward to next card
- boundary += N_words;
- start_index++;
- }
- assert(start_index <= end_index, "monotonicity of index_for()");
- assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
- switch (action) {
- case Action_mark: {
- if (init_to_zero()) {
- _array->set_offset_array(start_index, boundary, blk_start);
- break;
- } // Else fall through to the next case
- }
- case Action_single: {
- _array->set_offset_array(start_index, boundary, blk_start);
- // We have finished marking the "offset card". We need to now
- // mark the subsequent cards that this blk spans.
- if (start_index < end_index) {
- HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
- HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
- set_remainder_to_point_to_start(rem_st, rem_end);
- }
- break;
- }
- case Action_check: {
- _array->check_offset_array(start_index, boundary, blk_start);
- // We have finished checking the "offset card". We need to now
- // check the subsequent cards that this blk spans.
- check_all_cards(start_index + 1, end_index);
- break;
- }
- default:
- ShouldNotReachHere();
- }
- }
-}
-
// The card-interval [start_card, end_card] is a closed interval; this
// is an expensive check -- use with care and only under protection of
// suitable flag.
@@ -306,25 +202,6 @@
}
}
-// The range [blk_start, blk_end) represents a single contiguous block
-// of storage; modify the block offset table to represent this
-// information; Right-open interval: [blk_start, blk_end)
-// NOTE: this method does _not_ adjust _unallocated_block.
-void
-G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
- do_block_internal(blk_start, blk_end, Action_single);
-}
-
-// Mark the BOT such that if [blk_start, blk_end) straddles a card
-// boundary, the card following the first such boundary is marked
-// with the appropriate offset.
-// NOTE: this method does _not_ adjust _unallocated_block or
-// any cards subsequent to the first one.
-void
-G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
- do_block_internal(blk_start, blk_end, Action_mark);
-}
-
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array");
@@ -381,7 +258,7 @@
assert(next_boundary <= _array->_end,
err_msg("next_boundary is beyond the end of the covered region "
" next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
- next_boundary, _array->_end));
+ p2i(next_boundary), p2i(_array->_end)));
if (addr >= gsp()->top()) return gsp()->top();
while (next_boundary < addr) {
while (n <= next_boundary) {
@@ -397,57 +274,13 @@
return forward_to_block_containing_addr_const(q, n, addr);
}
-HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
- assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
-
- assert(_bottom <= addr && addr < _end,
- "addr must be covered by this Array");
- // Must read this exactly once because it can be modified by parallel
- // allocation.
- HeapWord* ub = _unallocated_block;
- if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
- assert(ub < _end, "tautology (see above)");
- return ub;
- }
-
- // Otherwise, find the block start using the table, but taking
- // care (cf block_start_unsafe() above) not to parse any objects/blocks
- // on the cards themselves.
- size_t index = _array->index_for(addr);
- assert(_array->address_for_index(index) == addr,
- "arg should be start of card");
-
- HeapWord* q = (HeapWord*)addr;
- uint offset;
- do {
- offset = _array->offset_array(index--);
- q -= offset;
- } while (offset == N_words);
- assert(q <= addr, "block start should be to left of arg");
- return q;
-}
-
// Note that the committed size of the covered space may have changed,
// so the table size might also wish to change.
void G1BlockOffsetArray::resize(size_t new_word_size) {
HeapWord* new_end = _bottom + new_word_size;
- if (_end < new_end && !init_to_zero()) {
- // verify that the old and new boundaries are also card boundaries
- assert(_array->is_card_boundary(_end),
- "_end not a card boundary");
- assert(_array->is_card_boundary(new_end),
- "new _end would not be a card boundary");
- // set all the newly added cards
- _array->set_offset_array(_end, new_end, N_words);
- }
_end = new_end; // update _end
}
-void G1BlockOffsetArray::set_region(MemRegion mr) {
- _bottom = mr.start();
- _end = mr.end();
-}
-
//
// threshold_
// | _index_
@@ -522,7 +355,7 @@
"blk_start: " PTR_FORMAT ", "
"boundary: " PTR_FORMAT,
(uint)_array->offset_array(orig_index),
- blk_start, boundary));
+ p2i(blk_start), p2i(boundary)));
for (size_t j = orig_index + 1; j <= end_index; j++) {
assert(_array->offset_array(j) > 0 &&
_array->offset_array(j) <=
@@ -556,9 +389,9 @@
"card addr: "PTR_FORMAT" BOT entry: %u "
"obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
"cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
- block_start, card, card_addr,
+ p2i(block_start), card, p2i(card_addr),
_array->offset_array(card),
- obj_start, word_size, first_card, last_card);
+ p2i(obj_start), word_size, first_card, last_card);
return false;
}
}
@@ -572,10 +405,10 @@
size_t to_index = _array->index_for(_end);
out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
"cards ["SIZE_FORMAT","SIZE_FORMAT")",
- _bottom, _end, from_index, to_index);
+ p2i(_bottom), p2i(_end), from_index, to_index);
for (size_t i = from_index; i < to_index; ++i) {
out->print_cr(" entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
- i, _array->address_for_index(i),
+ i, p2i(_array->address_for_index(i)),
(uint) _array->offset_array(i));
}
}
@@ -606,7 +439,7 @@
G1BlockOffsetArrayContigSpace::
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
MemRegion mr) :
- G1BlockOffsetArray(array, mr, true)
+ G1BlockOffsetArray(array, mr)
{
_next_offset_threshold = NULL;
_next_offset_index = 0;
@@ -641,15 +474,6 @@
return _next_offset_threshold;
}
-void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
- assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
- "just checking");
- size_t bottom_index = _array->index_for(_bottom);
- assert(_array->address_for_index(bottom_index) == _bottom,
- "Precondition of call");
- _array->set_offset_array(bottom_index, 0);
-}
-
void
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
assert(new_top <= _end, "_end should have already been updated");
@@ -663,7 +487,7 @@
void
G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
G1BlockOffsetArray::print_on(out);
- out->print_cr(" next offset threshold: "PTR_FORMAT, _next_offset_threshold);
+ out->print_cr(" next offset threshold: "PTR_FORMAT, p2i(_next_offset_threshold));
out->print_cr(" next offset index: "SIZE_FORMAT, _next_offset_index);
}
#endif // !PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -109,7 +109,12 @@
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
public:
- virtual void on_commit(uint start_idx, size_t num_regions);
+ virtual void on_commit(uint start_idx, size_t num_regions) {
+ // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
+ // retrieve it here since this would cause firing of several asserts. The code
+ // executed after commit of a region already needs to do some re-initialization of
+ // the HeapRegion, so we combine that.
+ }
};
// This implementation of "G1BlockOffsetTable" divides the covered region
@@ -153,8 +158,6 @@
// For performance these have to devolve to array accesses in product builds.
inline u_char offset_array(size_t index) const;
- void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
-
void set_offset_array_raw(size_t index, u_char offset) {
_offset_array[index] = offset;
}
@@ -165,8 +168,6 @@
inline void set_offset_array(size_t left, size_t right, u_char offset);
- inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
-
bool is_card_boundary(HeapWord* p) const;
public:
@@ -193,8 +194,6 @@
// G1BlockOffsetTable(s) to initialize cards.
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
- void set_bottom(HeapWord* new_bottom);
-
// Return the appropriate index into "_offset_array" for "p".
inline size_t index_for(const void* p) const;
inline size_t index_for_raw(const void* p) const;
@@ -220,14 +219,6 @@
LogN = G1BlockOffsetSharedArray::LogN
};
- // The following enums are used by do_block_helper
- enum Action {
- Action_single, // BOT records a single block (see single_block())
- Action_mark, // BOT marks the start of a block (see mark_block())
- Action_check // Check that BOT records block correctly
- // (see verify_single_block()).
- };
-
// This is the array, which can be shared by several BlockOffsetArray's
// servicing different
G1BlockOffsetSharedArray* _array;
@@ -235,10 +226,6 @@
// The space that owns this subregion.
G1OffsetTableContigSpace* _gsp;
- // If true, array entries are initialized to 0; otherwise, they are
- // initialized to point backwards to the beginning of the covered region.
- bool _init_to_zero;
-
// The portion [_unallocated_block, _sp.end()) of the space that
// is a single block known not to contain any objects.
// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
@@ -253,9 +240,6 @@
// that is closed: [start_index, end_index]
void set_remainder_to_point_to_start_incl(size_t start, size_t end);
- // A helper function for BOT adjustment/verification work
- void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
-
protected:
G1OffsetTableContigSpace* gsp() const { return _gsp; }
@@ -303,11 +287,9 @@
public:
// The space may not have it's bottom and top set yet, which is why the
- // region is passed as a parameter. If "init_to_zero" is true, the
- // elements of the array are initialized to zero. Otherwise, they are
- // initialized to point backwards to the beginning.
- G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
- bool init_to_zero);
+ // region is passed as a parameter. The elements of the array are
+ // initialized to zero.
+ G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
// Note: this ought to be part of the constructor, but that would require
// "this" to be passed as a parameter to a member constructor for
@@ -315,114 +297,19 @@
// This would be legal C++, but MS VC++ doesn't allow it.
void set_space(G1OffsetTableContigSpace* sp);
- // Resets the covered region to the given "mr".
- void set_region(MemRegion mr);
-
// Resets the covered region to one with the same _bottom as before but
// the "new_word_size".
void resize(size_t new_word_size);
- // These must be guaranteed to work properly (i.e., do nothing)
- // when "blk_start" ("blk" for second version) is "NULL".
- virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
- virtual void alloc_block(HeapWord* blk, size_t size) {
- alloc_block(blk, blk + size);
- }
-
- // The following methods are useful and optimized for a
- // general, non-contiguous space.
-
- // Given a block [blk_start, blk_start + full_blk_size), and
- // a left_blk_size < full_blk_size, adjust the BOT to show two
- // blocks [blk_start, blk_start + left_blk_size) and
- // [blk_start + left_blk_size, blk_start + full_blk_size).
- // It is assumed (and verified in the non-product VM) that the
- // BOT was correct for the original block.
- void split_block(HeapWord* blk_start, size_t full_blk_size,
- size_t left_blk_size);
-
- // Adjust the BOT to show that it has a single block in the
- // range [blk_start, blk_start + size). All necessary BOT
- // cards are adjusted, but _unallocated_block isn't.
- void single_block(HeapWord* blk_start, HeapWord* blk_end);
- void single_block(HeapWord* blk, size_t size) {
- single_block(blk, blk + size);
- }
-
- // Adjust BOT to show that it has a block in the range
- // [blk_start, blk_start + size). Only the first card
- // of BOT is touched. It is assumed (and verified in the
- // non-product VM) that the remaining cards of the block
- // are correct.
- void mark_block(HeapWord* blk_start, HeapWord* blk_end);
- void mark_block(HeapWord* blk, size_t size) {
- mark_block(blk, blk + size);
- }
-
- // Adjust _unallocated_block to indicate that a particular
- // block has been newly allocated or freed. It is assumed (and
- // verified in the non-product VM) that the BOT is correct for
- // the given block.
- inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
- // Verify that the BOT shows [blk, blk + blk_size) to be one block.
- verify_single_block(blk_start, blk_end);
- if (BlockOffsetArrayUseUnallocatedBlock) {
- _unallocated_block = MAX2(_unallocated_block, blk_end);
- }
- }
-
- inline void allocated(HeapWord* blk, size_t size) {
- allocated(blk, blk + size);
- }
-
- inline void freed(HeapWord* blk_start, HeapWord* blk_end);
-
- inline void freed(HeapWord* blk, size_t size);
-
virtual HeapWord* block_start_unsafe(const void* addr);
virtual HeapWord* block_start_unsafe_const(const void* addr) const;
- // Requires "addr" to be the start of a card and returns the
- // start of the block that contains the given address.
- HeapWord* block_start_careful(const void* addr) const;
-
- // If true, initialize array slots with no allocated blocks to zero.
- // Otherwise, make them point back to the front.
- bool init_to_zero() { return _init_to_zero; }
-
- // Verification & debugging - ensure that the offset table reflects the fact
- // that the block [blk_start, blk_end) or [blk, blk + size) is a
- // single block of storage. NOTE: can;t const this because of
- // call to non-const do_block_internal() below.
- inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
- if (VerifyBlockOffsetArray) {
- do_block_internal(blk_start, blk_end, Action_check);
- }
- }
-
- inline void verify_single_block(HeapWord* blk, size_t size) {
- verify_single_block(blk, blk + size);
- }
-
// Used by region verification. Checks that the contents of the
// BOT reflect that there's a single object that spans the address
// range [obj_start, obj_start + word_size); returns true if this is
// the case, returns false if it's not.
bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
- // Verify that the given block is before _unallocated_block
- inline void verify_not_unallocated(HeapWord* blk_start,
- HeapWord* blk_end) const {
- if (BlockOffsetArrayUseUnallocatedBlock) {
- assert(blk_start < blk_end, "Block inconsistency?");
- assert(blk_end <= _unallocated_block, "_unallocated_block problem");
- }
- }
-
- inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
- verify_not_unallocated(blk, blk + size);
- }
-
void check_all_cards(size_t left_card, size_t right_card) const;
virtual void print_on(outputStream* out) PRODUCT_RETURN;
@@ -445,14 +332,12 @@
blk_start, blk_end);
}
- // Variant of zero_bottom_entry that does not check for availability of the
+ // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
// memory first.
void zero_bottom_entry_raw();
// Variant of initialize_threshold that does not check for availability of the
// memory first.
HeapWord* initialize_threshold_raw();
- // Zero out the entry for _bottom (offset will be zero).
- void zero_bottom_entry();
public:
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -91,13 +91,6 @@
}
}
-void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
- check_index(index, "index out of range");
- assert(high >= low, "addresses out of order");
- check_offset(pointer_delta(high, low), "offset too large");
- assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
-}
-
// Variant of index_for that does not check the index for validity.
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
@@ -193,28 +186,4 @@
return q;
}
-//////////////////////////////////////////////////////////////////////////
-// BlockOffsetArrayNonContigSpace inlines
-//////////////////////////////////////////////////////////////////////////
-inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
- // Verify that the BOT shows [blk_start, blk_end) to be one block.
- verify_single_block(blk_start, blk_end);
- // adjust _unallocated_block upward or downward
- // as appropriate
- if (BlockOffsetArrayUseUnallocatedBlock) {
- assert(_unallocated_block <= _end,
- "Inconsistent value for _unallocated_block");
- if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
- // CMS-specific note: a block abutting _unallocated_block to
- // its left is being freed, a new block is being added or
- // we are resetting following a compaction
- _unallocated_block = blk_start;
- }
- }
-}
-
-inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
- freed(blk, blk + size);
-}
-
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -22,372 +22,375 @@
*
*/
-
#include "precompiled.hpp"
+#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "memory/heap.hpp"
#include "memory/iterator.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/hashtable.inline.hpp"
+#include "utilities/stack.inline.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
- _top = bottom();
+class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
+ friend class G1CodeRootSetTest;
+ typedef HashtableEntry<nmethod*, mtGC> Entry;
+
+ static CodeRootSetTable* volatile _purge_list;
+
+ CodeRootSetTable* _purge_next;
+
+ unsigned int compute_hash(nmethod* nm) {
+ uintptr_t hash = (uintptr_t)nm;
+ return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
+ }
+
+ Entry* new_entry(nmethod* nm);
+
+ public:
+ CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
+ ~CodeRootSetTable();
+
+ // Needs to be protected locks
+ bool add(nmethod* nm);
+ bool remove(nmethod* nm);
+
+ // Can be called without locking
+ bool contains(nmethod* nm);
+
+ int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
+
+ void copy_to(CodeRootSetTable* new_table);
+ void nmethods_do(CodeBlobClosure* blk);
+
+ template<typename CB>
+ void remove_if(CB& should_remove);
+
+ static void purge_list_append(CodeRootSetTable* tbl);
+ static void purge();
+
+ static size_t static_mem_size() {
+ return sizeof(_purge_list);
+ }
+};
+
+CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
+
+CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
+ unsigned int hash = compute_hash(nm);
+ Entry* entry = (Entry*) new_entry_free_list();
+ if (entry == NULL) {
+ entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
+ }
+ entry->set_next(NULL);
+ entry->set_hash(hash);
+ entry->set_literal(nm);
+ return entry;
}
-void G1CodeRootChunk::reset() {
- _next = _prev = NULL;
- _free = NULL;
- _top = bottom();
-}
-
-void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
- NmethodOrLink* cur = bottom();
- while (cur != _top) {
- if (is_nmethod(cur)) {
- cl->do_code_blob(cur->_nmethod);
+CodeRootSetTable::~CodeRootSetTable() {
+ for (int index = 0; index < table_size(); ++index) {
+ for (Entry* e = bucket(index); e != NULL; ) {
+ Entry* to_remove = e;
+ // read next before freeing.
+ e = e->next();
+ unlink_entry(to_remove);
+ FREE_C_HEAP_ARRAY(char, to_remove, mtGC);
}
- cur++;
+ }
+ assert(number_of_entries() == 0, "should have removed all entries");
+ free_buckets();
+ for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
+ FREE_C_HEAP_ARRAY(char, e, mtGC);
}
}
-bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
- NmethodOrLink* cur = bottom();
-
- for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
- if (cur->_nmethod == method) {
- bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
+bool CodeRootSetTable::add(nmethod* nm) {
+ if (!contains(nm)) {
+ Entry* e = new_entry(nm);
+ int index = hash_to_index(e->hash());
+ add_entry(index, e);
+ return true;
+ }
+ return false;
+}
- if (!result) {
- // Someone else cleared out this entry.
- return false;
- }
+bool CodeRootSetTable::contains(nmethod* nm) {
+ int index = hash_to_index(compute_hash(nm));
+ for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+ if (e->literal() == nm) {
+ return true;
+ }
+ }
+ return false;
+}
- // The method was cleared. Time to link it into the free list.
- NmethodOrLink* prev_free;
- do {
- prev_free = (NmethodOrLink*)_free;
- cur->_link = prev_free;
- } while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
-
+bool CodeRootSetTable::remove(nmethod* nm) {
+ int index = hash_to_index(compute_hash(nm));
+ Entry* previous = NULL;
+ for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
+ if (e->literal() == nm) {
+ if (previous != NULL) {
+ previous->set_next(e->next());
+ } else {
+ set_entry(index, e->next());
+ }
+ free_entry(e);
return true;
}
}
-
return false;
}
-G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
- _free_list.initialize();
- _free_list.set_size(G1CodeRootChunk::word_size());
+void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
+ for (int index = 0; index < table_size(); ++index) {
+ for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+ new_table->add(e->literal());
+ }
+ }
+ new_table->copy_freelist(this);
}
-size_t G1CodeRootChunkManager::fl_mem_size() {
- return _free_list.count() * _free_list.size();
-}
-
-void G1CodeRootChunkManager::free_all_chunks(FreeList<G1CodeRootChunk>* list) {
- _num_chunks_handed_out -= list->count();
- _free_list.prepend(list);
+void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
+ for (int index = 0; index < table_size(); ++index) {
+ for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+ blk->do_code_blob(e->literal());
+ }
+ }
}
-void G1CodeRootChunkManager::free_chunk(G1CodeRootChunk* chunk) {
- _free_list.return_chunk_at_head(chunk);
- _num_chunks_handed_out--;
+template<typename CB>
+void CodeRootSetTable::remove_if(CB& should_remove) {
+ for (int index = 0; index < table_size(); ++index) {
+ Entry* previous = NULL;
+ Entry* e = bucket(index);
+ while (e != NULL) {
+ Entry* next = e->next();
+ if (should_remove(e->literal())) {
+ if (previous != NULL) {
+ previous->set_next(next);
+ } else {
+ set_entry(index, next);
+ }
+ free_entry(e);
+ } else {
+ previous = e;
+ }
+ e = next;
+ }
+ }
+}
+
+G1CodeRootSet::~G1CodeRootSet() {
+ delete _table;
}
-void G1CodeRootChunkManager::purge_chunks(size_t keep_ratio) {
- size_t keep = _num_chunks_handed_out * keep_ratio / 100;
- if (keep >= (size_t)_free_list.count()) {
- return;
- }
+CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
+ return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
+}
+
+void G1CodeRootSet::allocate_small_table() {
+ _table = new CodeRootSetTable(SmallSize);
+}
- FreeList<G1CodeRootChunk> temp;
- temp.initialize();
- temp.set_size(G1CodeRootChunk::word_size());
+void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) {
+ for (;;) {
+ table->_purge_next = _purge_list;
+ CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
+ if (old == table->_purge_next) {
+ break;
+ }
+ }
+}
- _free_list.getFirstNChunksFromList((size_t)_free_list.count() - keep, &temp);
-
- G1CodeRootChunk* cur = temp.get_chunk_at_head();
- while (cur != NULL) {
- delete cur;
- cur = temp.get_chunk_at_head();
+void CodeRootSetTable::purge() {
+ CodeRootSetTable* table = _purge_list;
+ _purge_list = NULL;
+ while (table != NULL) {
+ CodeRootSetTable* to_purge = table;
+ table = table->_purge_next;
+ delete to_purge;
}
}
-size_t G1CodeRootChunkManager::static_mem_size() {
- return sizeof(G1CodeRootChunkManager);
+void G1CodeRootSet::move_to_large() {
+ CodeRootSetTable* temp = new CodeRootSetTable(LargeSize);
+
+ _table->copy_to(temp);
+
+ CodeRootSetTable::purge_list_append(_table);
+
+ OrderAccess::release_store_ptr(&_table, temp);
}
-G1CodeRootChunk* G1CodeRootChunkManager::new_chunk() {
- G1CodeRootChunk* result = _free_list.get_chunk_at_head();
- if (result == NULL) {
- result = new G1CodeRootChunk();
+void G1CodeRootSet::purge() {
+ CodeRootSetTable::purge();
+}
+
+size_t G1CodeRootSet::static_mem_size() {
+ return CodeRootSetTable::static_mem_size();
+}
+
+void G1CodeRootSet::add(nmethod* method) {
+ bool added = false;
+ if (is_empty()) {
+ allocate_small_table();
+ }
+ added = _table->add(method);
+ if (_length == Threshold) {
+ move_to_large();
+ }
+ if (added) {
+ ++_length;
+ }
+}
+
+bool G1CodeRootSet::remove(nmethod* method) {
+ bool removed = false;
+ if (_table != NULL) {
+ removed = _table->remove(method);
+ }
+ if (removed) {
+ _length--;
+ if (_length == 0) {
+ clear();
+ }
+ }
+ return removed;
+}
+
+bool G1CodeRootSet::contains(nmethod* method) {
+ CodeRootSetTable* table = load_acquire_table();
+ if (table != NULL) {
+ return table->contains(method);
}
- _num_chunks_handed_out++;
- result->reset();
- return result;
+ return false;
+}
+
+void G1CodeRootSet::clear() {
+ delete _table;
+ _table = NULL;
+ _length = 0;
+}
+
+size_t G1CodeRootSet::mem_size() {
+ return sizeof(*this) +
+ (_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0);
+}
+
+void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
+ if (_table != NULL) {
+ _table->nmethods_do(blk);
+ }
+}
+
+class CleanCallback : public StackObj {
+ class PointsIntoHRDetectionClosure : public OopClosure {
+ HeapRegion* _hr;
+ public:
+ bool _points_into;
+ PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
+
+ void do_oop(narrowOop* o) {
+ do_oop_work(o);
+ }
+
+ void do_oop(oop* o) {
+ do_oop_work(o);
+ }
+
+ template <typename T>
+ void do_oop_work(T* p) {
+ if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
+ _points_into = true;
+ }
+ }
+ };
+
+ PointsIntoHRDetectionClosure _detector;
+ CodeBlobToOopClosure _blobs;
+
+ public:
+ CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
+
+ bool operator() (nmethod* nm) {
+ _detector._points_into = false;
+ _blobs.do_code_blob(nm);
+ return _detector._points_into;
+ }
+};
+
+void G1CodeRootSet::clean(HeapRegion* owner) {
+ CleanCallback should_clean(owner);
+ if (_table != NULL) {
+ _table->remove_if(should_clean);
+ }
}
#ifndef PRODUCT
-size_t G1CodeRootChunkManager::num_chunks_handed_out() const {
- return _num_chunks_handed_out;
-}
+class G1CodeRootSetTest {
+ public:
+ static void test() {
+ {
+ G1CodeRootSet set1;
+ assert(set1.is_empty(), "Code root set must be initially empty but is not.");
+
+ assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
+ err_msg("The code root set's static memory usage is incorrect, "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size()));
+
+ set1.add((nmethod*)1);
+ assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
+ SIZE_FORMAT" elements", set1.length()));
+
+ const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
+
+ for (size_t i = 1; i <= num_to_add; i++) {
+ set1.add((nmethod*)1);
+ }
+ assert(set1.length() == 1,
+ err_msg("Duplicate detection should not have increased the set size but "
+ "is "SIZE_FORMAT, set1.length()));
-size_t G1CodeRootChunkManager::num_free_chunks() const {
- return (size_t)_free_list.count();
+ for (size_t i = 2; i <= num_to_add; i++) {
+ set1.add((nmethod*)(uintptr_t)(i));
+ }
+ assert(set1.length() == num_to_add,
+ err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
+ "need to be in the set, but there are only "SIZE_FORMAT,
+ num_to_add, set1.length()));
+
+ assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
+
+ size_t num_popped = 0;
+ for (size_t i = 1; i <= num_to_add; i++) {
+ bool removed = set1.remove((nmethod*)i);
+ if (removed) {
+ num_popped += 1;
+ } else {
+ break;
+ }
+ }
+ assert(num_popped == num_to_add,
+ err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
+ "were added", num_popped, num_to_add));
+ assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
+
+ G1CodeRootSet::purge();
+
+ assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables");
+
+ }
+
+ }
+};
+
+void TestCodeCacheRemSet_test() {
+ G1CodeRootSetTest::test();
}
#endif
-
-G1CodeRootChunkManager G1CodeRootSet::_default_chunk_manager;
-
-void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
- _default_chunk_manager.purge_chunks(keep_ratio);
-}
-
-size_t G1CodeRootSet::free_chunks_static_mem_size() {
- return _default_chunk_manager.static_mem_size();
-}
-
-size_t G1CodeRootSet::free_chunks_mem_size() {
- return _default_chunk_manager.fl_mem_size();
-}
-
-G1CodeRootSet::G1CodeRootSet(G1CodeRootChunkManager* manager) : _manager(manager), _list(), _length(0) {
- if (_manager == NULL) {
- _manager = &_default_chunk_manager;
- }
- _list.initialize();
- _list.set_size(G1CodeRootChunk::word_size());
-}
-
-G1CodeRootSet::~G1CodeRootSet() {
- clear();
-}
-
-void G1CodeRootSet::add(nmethod* method) {
- if (!contains(method)) {
- // Find the first chunk that isn't full.
- G1CodeRootChunk* cur = _list.head();
- while (cur != NULL) {
- if (!cur->is_full()) {
- break;
- }
- cur = cur->next();
- }
-
- // All chunks are full, get a new chunk.
- if (cur == NULL) {
- cur = new_chunk();
- _list.return_chunk_at_head(cur);
- }
-
- // Add the nmethod.
- bool result = cur->add(method);
-
- guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
-
- _length++;
- }
-}
-
-void G1CodeRootSet::remove_lock_free(nmethod* method) {
- G1CodeRootChunk* found = find(method);
- if (found != NULL) {
- bool result = found->remove_lock_free(method);
- if (result) {
- Atomic::dec_ptr((volatile intptr_t*)&_length);
- }
- }
- assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
-}
-
-nmethod* G1CodeRootSet::pop() {
- while (true) {
- G1CodeRootChunk* cur = _list.head();
- if (cur == NULL) {
- assert(_length == 0, "when there are no chunks, there should be no elements");
- return NULL;
- }
- nmethod* result = cur->pop();
- if (result != NULL) {
- _length--;
- return result;
- } else {
- free(_list.get_chunk_at_head());
- }
- }
-}
-
-G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
- G1CodeRootChunk* cur = _list.head();
- while (cur != NULL) {
- if (cur->contains(method)) {
- return cur;
- }
- cur = (G1CodeRootChunk*)cur->next();
- }
- return NULL;
-}
-
-void G1CodeRootSet::free(G1CodeRootChunk* chunk) {
- free_chunk(chunk);
-}
-
-bool G1CodeRootSet::contains(nmethod* method) {
- return find(method) != NULL;
-}
-
-void G1CodeRootSet::clear() {
- free_all_chunks(&_list);
- _length = 0;
-}
-
-void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
- G1CodeRootChunk* cur = _list.head();
- while (cur != NULL) {
- cur->nmethods_do(blk);
- cur = (G1CodeRootChunk*)cur->next();
- }
-}
-
-size_t G1CodeRootSet::static_mem_size() {
- return sizeof(G1CodeRootSet);
-}
-
-size_t G1CodeRootSet::mem_size() {
- return G1CodeRootSet::static_mem_size() + _list.count() * _list.size();
-}
-
-#ifndef PRODUCT
-
-void G1CodeRootSet::test() {
- G1CodeRootChunkManager mgr;
-
- assert(mgr.num_chunks_handed_out() == 0, "Must not have handed out chunks yet");
-
- assert(G1CodeRootChunkManager::static_mem_size() > sizeof(void*),
- err_msg("The chunk manager's static memory usage seems too small, is only "SIZE_FORMAT" bytes.", G1CodeRootChunkManager::static_mem_size()));
-
- // The number of chunks that we allocate for purge testing.
- size_t const num_chunks = 10;
-
- {
- G1CodeRootSet set1(&mgr);
- assert(set1.is_empty(), "Code root set must be initially empty but is not.");
-
- assert(G1CodeRootSet::static_mem_size() > sizeof(void*),
- err_msg("The code root set's static memory usage seems too small, is only "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size()));
-
- set1.add((nmethod*)1);
- assert(mgr.num_chunks_handed_out() == 1,
- err_msg("Must have allocated and handed out one chunk, but handed out "
- SIZE_FORMAT" chunks", mgr.num_chunks_handed_out()));
- assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
- SIZE_FORMAT" elements", set1.length()));
-
- // G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which
- // we cannot access.
- for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) {
- set1.add((nmethod*)1);
- }
- assert(mgr.num_chunks_handed_out() == 1,
- err_msg("Duplicate detection must have prevented allocation of further "
- "chunks but allocated "SIZE_FORMAT, mgr.num_chunks_handed_out()));
- assert(set1.length() == 1,
- err_msg("Duplicate detection should not have increased the set size but "
- "is "SIZE_FORMAT, set1.length()));
-
- size_t num_total_after_add = G1CodeRootChunk::word_size() + 1;
- for (size_t i = 0; i < num_total_after_add - 1; i++) {
- set1.add((nmethod*)(uintptr_t)(2 + i));
- }
- assert(mgr.num_chunks_handed_out() > 1,
- "After adding more code roots, more than one additional chunk should have been handed out");
- assert(set1.length() == num_total_after_add,
- err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
- "need to be in the set, but there are only "SIZE_FORMAT,
- num_total_after_add, set1.length()));
-
- size_t num_popped = 0;
- while (set1.pop() != NULL) {
- num_popped++;
- }
- assert(num_popped == num_total_after_add,
- err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
- "were added", num_popped, num_total_after_add));
- assert(mgr.num_chunks_handed_out() == 0,
- err_msg("After popping all elements, all chunks must have been returned "
- "but there are still "SIZE_FORMAT" additional", mgr.num_chunks_handed_out()));
-
- mgr.purge_chunks(0);
- assert(mgr.num_free_chunks() == 0,
- err_msg("After purging everything, the free list must be empty but still "
- "contains "SIZE_FORMAT" chunks", mgr.num_free_chunks()));
-
- // Add some more handed out chunks.
- size_t i = 0;
- while (mgr.num_chunks_handed_out() < num_chunks) {
- set1.add((nmethod*)i);
- i++;
- }
-
- {
- // Generate chunks on the free list.
- G1CodeRootSet set2(&mgr);
- size_t i = 0;
- while (mgr.num_chunks_handed_out() < (num_chunks * 2)) {
- set2.add((nmethod*)i);
- i++;
- }
- // Exit of the scope of the set2 object will call the destructor that generates
- // num_chunks elements on the free list.
- }
-
- assert(mgr.num_chunks_handed_out() == num_chunks,
- err_msg("Deletion of the second set must have resulted in giving back "
- "those, but there are still "SIZE_FORMAT" additional handed out, expecting "
- SIZE_FORMAT, mgr.num_chunks_handed_out(), num_chunks));
- assert(mgr.num_free_chunks() == num_chunks,
- err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
- "but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks()));
-
- size_t const test_percentage = 50;
- mgr.purge_chunks(test_percentage);
- assert(mgr.num_chunks_handed_out() == num_chunks,
- err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT,
- mgr.num_chunks_handed_out()));
- assert(mgr.num_free_chunks() == (size_t)(mgr.num_chunks_handed_out() * test_percentage / 100),
- err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks"
- "but there are "SIZE_FORMAT, test_percentage, num_chunks,
- mgr.num_free_chunks()));
- // Purge the remainder of the chunks on the free list.
- mgr.purge_chunks(0);
- assert(mgr.num_free_chunks() == 0, "Free List must be empty");
- assert(mgr.num_chunks_handed_out() == num_chunks,
- err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set "
- "but there are "SIZE_FORMAT, num_chunks, mgr.num_chunks_handed_out()));
-
- // Exit of the scope of the set1 object will call the destructor that generates
- // num_chunks additional elements on the free list.
- }
-
- assert(mgr.num_chunks_handed_out() == 0,
- err_msg("Deletion of the only set must have resulted in no chunks handed "
- "out, but there is still "SIZE_FORMAT" handed out", mgr.num_chunks_handed_out()));
- assert(mgr.num_free_chunks() == num_chunks,
- err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
- "but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks()));
-
- // Restore initial state.
- mgr.purge_chunks(0);
- assert(mgr.num_free_chunks() == 0, "Free List must be empty");
- assert(mgr.num_chunks_handed_out() == 0, "No additional elements must have been handed out yet");
-}
-
-void TestCodeCacheRemSet_test() {
- G1CodeRootSet::test();
-}
-#endif
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -26,222 +26,64 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#include "memory/allocation.hpp"
-#include "memory/freeList.hpp"
-#include "runtime/globals.hpp"
class CodeBlobClosure;
-
-// The elements of the G1CodeRootChunk is either:
-// 1) nmethod pointers
-// 2) nodes in an internally chained free list
-typedef union {
- nmethod* _nmethod;
- void* _link;
-} NmethodOrLink;
-
-class G1CodeRootChunk : public CHeapObj<mtGC> {
- private:
- static const int NUM_ENTRIES = 32;
- public:
- G1CodeRootChunk* _next;
- G1CodeRootChunk* _prev;
-
- NmethodOrLink* _top;
- // First free position within the chunk.
- volatile NmethodOrLink* _free;
-
- NmethodOrLink _data[NUM_ENTRIES];
-
- NmethodOrLink* bottom() const {
- return (NmethodOrLink*) &(_data[0]);
- }
-
- NmethodOrLink* end() const {
- return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
- }
-
- bool is_link(NmethodOrLink* nmethod_or_link) {
- return nmethod_or_link->_link == NULL ||
- (bottom() <= nmethod_or_link->_link
- && nmethod_or_link->_link < end());
- }
-
- bool is_nmethod(NmethodOrLink* nmethod_or_link) {
- return !is_link(nmethod_or_link);
- }
-
- public:
- G1CodeRootChunk();
- ~G1CodeRootChunk() {}
-
- static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); }
-
- // FreeList "interface" methods
-
- G1CodeRootChunk* next() const { return _next; }
- G1CodeRootChunk* prev() const { return _prev; }
- void set_next(G1CodeRootChunk* v) { _next = v; assert(v != this, "Boom");}
- void set_prev(G1CodeRootChunk* v) { _prev = v; assert(v != this, "Boom");}
- void clear_next() { set_next(NULL); }
- void clear_prev() { set_prev(NULL); }
-
- size_t size() const { return word_size(); }
-
- void link_next(G1CodeRootChunk* ptr) { set_next(ptr); }
- void link_prev(G1CodeRootChunk* ptr) { set_prev(ptr); }
- void link_after(G1CodeRootChunk* ptr) {
- link_next(ptr);
- if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this);
- }
-
- bool is_free() { return true; }
-
- // New G1CodeRootChunk routines
-
- void reset();
-
- bool is_empty() const {
- return _top == bottom();
- }
-
- bool is_full() const {
- return _top == end() && _free == NULL;
- }
-
- bool contains(nmethod* method) {
- NmethodOrLink* cur = bottom();
- while (cur != _top) {
- if (cur->_nmethod == method) return true;
- cur++;
- }
- return false;
- }
-
- bool add(nmethod* method) {
- if (is_full()) {
- return false;
- }
-
- if (_free != NULL) {
- // Take from internally chained free list
- NmethodOrLink* first_free = (NmethodOrLink*)_free;
- _free = (NmethodOrLink*)_free->_link;
- first_free->_nmethod = method;
- } else {
- // Take from top.
- _top->_nmethod = method;
- _top++;
- }
-
- return true;
- }
-
- bool remove_lock_free(nmethod* method);
-
- void nmethods_do(CodeBlobClosure* blk);
-
- nmethod* pop() {
- if (_free != NULL) {
- // Kill the free list.
- _free = NULL;
- }
-
- while (!is_empty()) {
- _top--;
- if (is_nmethod(_top)) {
- return _top->_nmethod;
- }
- }
-
- return NULL;
- }
-};
-
-// Manages free chunks.
-class G1CodeRootChunkManager VALUE_OBJ_CLASS_SPEC {
- private:
- // Global free chunk list management
- FreeList<G1CodeRootChunk> _free_list;
- // Total number of chunks handed out
- size_t _num_chunks_handed_out;
-
- public:
- G1CodeRootChunkManager();
-
- G1CodeRootChunk* new_chunk();
- void free_chunk(G1CodeRootChunk* chunk);
- // Free all elements of the given list.
- void free_all_chunks(FreeList<G1CodeRootChunk>* list);
-
- void initialize();
- void purge_chunks(size_t keep_ratio);
-
- static size_t static_mem_size();
- size_t fl_mem_size();
-
-#ifndef PRODUCT
- size_t num_chunks_handed_out() const;
- size_t num_free_chunks() const;
-#endif
-};
+class CodeRootSetTable;
+class HeapRegion;
+class nmethod;
// Implements storage for a set of code roots.
// All methods that modify the set are not thread-safe except if otherwise noted.
class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
+ friend class G1CodeRootSetTest;
private:
- // Global default free chunk manager instance.
- static G1CodeRootChunkManager _default_chunk_manager;
- G1CodeRootChunk* new_chunk() { return _manager->new_chunk(); }
- void free_chunk(G1CodeRootChunk* chunk) { _manager->free_chunk(chunk); }
- // Free all elements of the given list.
- void free_all_chunks(FreeList<G1CodeRootChunk>* list) { _manager->free_all_chunks(list); }
+ const static size_t SmallSize = 32;
+ const static size_t Threshold = 24;
+ const static size_t LargeSize = 512;
- // Return the chunk that contains the given nmethod, NULL otherwise.
- // Scans the list of chunks backwards, as this method is used to add new
- // entries, which are typically added in bulk for a single nmethod.
- G1CodeRootChunk* find(nmethod* method);
- void free(G1CodeRootChunk* chunk);
+ CodeRootSetTable* _table;
+ CodeRootSetTable* load_acquire_table();
size_t _length;
- FreeList<G1CodeRootChunk> _list;
- G1CodeRootChunkManager* _manager;
+
+ void move_to_large();
+ void allocate_small_table();
public:
- // If an instance is initialized with a chunk manager of NULL, use the global
- // default one.
- G1CodeRootSet(G1CodeRootChunkManager* manager = NULL);
+ G1CodeRootSet() : _table(NULL), _length(0) {}
~G1CodeRootSet();
- static void purge_chunks(size_t keep_ratio);
+ static void purge();
- static size_t free_chunks_static_mem_size();
- static size_t free_chunks_mem_size();
+ static size_t static_mem_size();
- // Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
- // method is likely to be repeatedly called with the same nmethod.
void add(nmethod* method);
- void remove_lock_free(nmethod* method);
- nmethod* pop();
+ bool remove(nmethod* method);
+ // Safe to call without synchronization, but may return false negatives.
bool contains(nmethod* method);
void clear();
void nmethods_do(CodeBlobClosure* blk) const;
- bool is_empty() { return length() == 0; }
+ // Remove all nmethods which no longer contain pointers into our "owner" region
+ void clean(HeapRegion* owner);
+
+ bool is_empty() {
+ bool empty = length() == 0;
+ assert(empty == (_table == NULL), "is empty only if table is deallocated");
+ return empty;
+ }
// Length in elements
size_t length() const { return _length; }
- // Static data memory size in bytes of this set.
- static size_t static_mem_size();
// Memory size in bytes taken by this set.
size_t mem_size();
- static void test() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -532,9 +532,9 @@
// again to allocate from it.
append_secondary_free_list();
- assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
+ assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
"empty we should have moved at least one entry to the free_list");
- HeapRegion* res = _hrs.allocate_free_region(is_old);
+ HeapRegion* res = _hrm.allocate_free_region(is_old);
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"allocated "HR_FORMAT" from secondary_free_list",
@@ -575,7 +575,7 @@
}
}
- res = _hrs.allocate_free_region(is_old);
+ res = _hrm.allocate_free_region(is_old);
if (res == NULL) {
if (G1ConcRegionFreeingVerbose) {
@@ -601,7 +601,7 @@
// always expand the heap by an amount aligned to the heap
// region size, the free list should in theory not be empty.
// In either case allocate_free_region() will check for NULL.
- res = _hrs.allocate_free_region(is_old);
+ res = _hrm.allocate_free_region(is_old);
} else {
_expand_heap_after_alloc_failure = false;
}
@@ -613,7 +613,7 @@
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
size_t word_size) {
- assert(first != G1_NO_HRS_INDEX, "pre-condition");
+ assert(first != G1_NO_HRM_INDEX, "pre-condition");
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
@@ -751,7 +751,7 @@
verify_region_sets_optional();
- uint first = G1_NO_HRS_INDEX;
+ uint first = G1_NO_HRM_INDEX;
uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
if (obj_regions == 1) {
@@ -760,7 +760,7 @@
// later.
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
if (hr != NULL) {
- first = hr->hrs_index();
+ first = hr->hrm_index();
}
} else {
// We can't allocate humongous regions spanning more than one region while
@@ -776,18 +776,18 @@
// Policy: Try only empty regions (i.e. already committed first). Maybe we
// are lucky enough to find some.
- first = _hrs.find_contiguous_only_empty(obj_regions);
- if (first != G1_NO_HRS_INDEX) {
- _hrs.allocate_free_regions_starting_at(first, obj_regions);
- }
- }
-
- if (first == G1_NO_HRS_INDEX) {
+ first = _hrm.find_contiguous_only_empty(obj_regions);
+ if (first != G1_NO_HRM_INDEX) {
+ _hrm.allocate_free_regions_starting_at(first, obj_regions);
+ }
+ }
+
+ if (first == G1_NO_HRM_INDEX) {
// Policy: We could not find enough regions for the humongous object in the
// free list. Look through the heap to find a mix of free and uncommitted regions.
// If so, try expansion.
- first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
- if (first != G1_NO_HRS_INDEX) {
+ first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
+ if (first != G1_NO_HRM_INDEX) {
// We found something. Make sure these regions are committed, i.e. expand
// the heap. Alternatively we could do a defragmentation GC.
ergo_verbose1(ErgoHeapSizing,
@@ -796,7 +796,7 @@
ergo_format_byte("allocation request"),
word_size * HeapWordSize);
- _hrs.expand_at(first, obj_regions);
+ _hrm.expand_at(first, obj_regions);
g1_policy()->record_new_heap_size(num_regions());
#ifdef ASSERT
@@ -806,14 +806,14 @@
assert(is_on_master_free_list(hr), "sanity");
}
#endif
- _hrs.allocate_free_regions_starting_at(first, obj_regions);
+ _hrm.allocate_free_regions_starting_at(first, obj_regions);
} else {
// Policy: Potentially trigger a defragmentation GC.
}
}
HeapWord* result = NULL;
- if (first != G1_NO_HRS_INDEX) {
+ if (first != G1_NO_HRM_INDEX) {
result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
assert(result != NULL, "it should always return a valid result");
@@ -1248,7 +1248,7 @@
: _hr_printer(hr_printer) { }
};
-void G1CollectedHeap::print_hrs_post_compaction() {
+void G1CollectedHeap::print_hrm_post_compaction() {
PostCompactionPrinterClosure cl(hr_printer());
heap_region_iterate(&cl);
}
@@ -1417,7 +1417,7 @@
// that all the COMMIT / UNCOMMIT events are generated before
// the end GC event.
- print_hrs_post_compaction();
+ print_hrm_post_compaction();
_hr_printer.end_gc(true /* full */, (size_t) total_collections());
}
@@ -1490,7 +1490,7 @@
// Update the number of full collections that have been completed.
increment_old_marking_cycles_completed(false /* concurrent */);
- _hrs.verify_optional();
+ _hrm.verify_optional();
verify_region_sets_optional();
verify_after_gc();
@@ -1734,7 +1734,7 @@
ergo_format_byte("allocation request"),
word_size * HeapWordSize);
if (expand(expand_bytes)) {
- _hrs.verify_optional();
+ _hrm.verify_optional();
verify_region_sets_optional();
return attempt_allocation_at_safepoint(word_size,
false /* expect_null_mutator_alloc_region */);
@@ -1762,7 +1762,7 @@
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region");
- uint expanded_by = _hrs.expand_by(regions_to_expand);
+ uint expanded_by = _hrm.expand_by(regions_to_expand);
if (expanded_by > 0) {
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
@@ -1775,7 +1775,7 @@
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if (G1ExitOnExpansionFailure &&
- _hrs.available() >= regions_to_expand) {
+ _hrm.available() >= regions_to_expand) {
// We had head room...
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
}
@@ -1790,7 +1790,7 @@
HeapRegion::GrainBytes);
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
- uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
+ uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
ergo_verbose3(ErgoHeapSizing,
@@ -1823,7 +1823,7 @@
shrink_helper(shrink_bytes);
rebuild_region_sets(true /* free_list_only */);
- _hrs.verify_optional();
+ _hrm.verify_optional();
verify_region_sets_optional();
}
@@ -1867,6 +1867,7 @@
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_concurrent_cycle_started(false),
+ _heap_summary_sent(false),
_in_cset_fast_test(),
_dirty_cards_region_list(NULL),
_worker_cset_start_region(NULL),
@@ -2032,7 +2033,7 @@
CMBitMap::mark_distance(),
mtGC);
- _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+ _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
g1_barrier_set()->initialize(cardtable_storage);
// Do later initialization work for concurrent refinement.
_cg1r->init(card_counts_storage);
@@ -2053,8 +2054,8 @@
_g1h = this;
- _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
- _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
+ _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
+ _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
@@ -2115,7 +2116,7 @@
// Here we allocate the dummy HeapRegion that is required by the
// G1AllocRegion class.
- HeapRegion* dummy_region = _hrs.get_dummy_region();
+ HeapRegion* dummy_region = _hrm.get_dummy_region();
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
@@ -2232,14 +2233,14 @@
}
size_t G1CollectedHeap::capacity() const {
- return _hrs.length() * HeapRegion::GrainBytes;
+ return _hrm.length() * HeapRegion::GrainBytes;
}
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "pre-condition");
hr->reset_gc_time_stamp();
if (hr->startsHumongous()) {
- uint first_index = hr->hrs_index() + 1;
+ uint first_index = hr->hrm_index() + 1;
uint last_index = hr->last_hc_index();
for (uint i = first_index; i < last_index; i += 1) {
HeapRegion* chr = region_at(i);
@@ -2445,13 +2446,24 @@
_gc_timer_cm->register_gc_end();
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
+ // Clear state variables to prepare for the next concurrent cycle.
_concurrent_cycle_started = false;
+ _heap_summary_sent = false;
}
}
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
if (_concurrent_cycle_started) {
- trace_heap_after_gc(_gc_tracer_cm);
+ // This function can be called when:
+ // the cleanup pause is run
+ // the concurrent cycle is aborted before the cleanup pause.
+ // the concurrent cycle is aborted after the cleanup pause,
+ // but before the concurrent cycle end has been registered.
+ // Make sure that we only send the heap information once.
+ if (!_heap_summary_sent) {
+ trace_heap_after_gc(_gc_tracer_cm);
+ _heap_summary_sent = true;
+ }
}
}
@@ -2537,7 +2549,7 @@
}
bool G1CollectedHeap::is_in(const void* p) const {
- if (_hrs.reserved().contains(p)) {
+ if (_hrm.reserved().contains(p)) {
// Given that we know that p is in the reserved space,
// heap_region_containing_raw() should successfully
// return the containing region.
@@ -2551,7 +2563,7 @@
#ifdef ASSERT
bool G1CollectedHeap::is_in_exact(const void* p) const {
bool contains = reserved_region().contains(p);
- bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
+ bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
if (contains && available) {
return true;
} else {
@@ -2618,7 +2630,7 @@
}
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
- _hrs.iterate(cl);
+ _hrm.iterate(cl);
}
void
@@ -2626,7 +2638,7 @@
uint worker_id,
uint num_workers,
jint claim_value) const {
- _hrs.par_iterate(cl, worker_id, num_workers, claim_value);
+ _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
}
class ResetClaimValuesClosure: public HeapRegionClosure {
@@ -2846,9 +2858,9 @@
}
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
- HeapRegion* result = _hrs.next_region_in_heap(from);
+ HeapRegion* result = _hrm.next_region_in_heap(from);
while (result != NULL && result->isHumongous()) {
- result = _hrs.next_region_in_heap(result);
+ result = _hrm.next_region_in_heap(result);
}
return result;
}
@@ -2908,7 +2920,7 @@
}
size_t G1CollectedHeap::max_capacity() const {
- return _hrs.reserved().byte_size();
+ return _hrm.reserved().byte_size();
}
jlong G1CollectedHeap::millis_since_last_gc() {
@@ -3437,9 +3449,9 @@
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
capacity()/K, used_unlocked()/K);
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
- _hrs.reserved().start(),
- _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
- _hrs.reserved().end());
+ _hrm.reserved().start(),
+ _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
+ _hrm.reserved().end());
st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
uint young_regions = _young_list->length();
@@ -3682,7 +3694,7 @@
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- uint region_idx = r->hrs_index();
+ uint region_idx = r->hrm_index();
bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
// Is_candidate already filters out humongous regions with some remembered set.
// This will not lead to humongous object that we mistakenly keep alive because
@@ -4205,7 +4217,7 @@
// output from the concurrent mark thread interfering with this
// logging output either.
- _hrs.verify_optional();
+ _hrm.verify_optional();
verify_region_sets_optional();
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
@@ -4658,6 +4670,56 @@
}
};
+class G1CodeBlobClosure : public CodeBlobClosure {
+ class HeapRegionGatheringOopClosure : public OopClosure {
+ G1CollectedHeap* _g1h;
+ OopClosure* _work;
+ nmethod* _nm;
+
+ template <typename T>
+ void do_oop_work(T* p) {
+ _work->do_oop(p);
+ T oop_or_narrowoop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(oop_or_narrowoop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+ HeapRegion* hr = _g1h->heap_region_containing_raw(o);
+ assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
+ hr->add_strong_code_root(_nm);
+ }
+ }
+
+ public:
+ HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
+
+ void do_oop(oop* o) {
+ do_oop_work(o);
+ }
+
+ void do_oop(narrowOop* o) {
+ do_oop_work(o);
+ }
+
+ void set_nm(nmethod* nm) {
+ _nm = nm;
+ }
+ };
+
+ HeapRegionGatheringOopClosure _oc;
+public:
+ G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ if (!nm->test_set_oops_do_mark()) {
+ _oc.set_nm(nm);
+ nm->oops_do(&_oc);
+ nm->fix_oop_relocations();
+ }
+ }
+ }
+};
+
class G1ParTask : public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
@@ -4726,22 +4788,6 @@
}
};
- class G1CodeBlobClosure: public CodeBlobClosure {
- OopClosure* _f;
-
- public:
- G1CodeBlobClosure(OopClosure* f) : _f(f) {}
- void do_code_blob(CodeBlob* blob) {
- nmethod* that = blob->as_nmethod_or_null();
- if (that != NULL) {
- if (!that->test_set_oops_do_mark()) {
- that->oops_do(_f);
- that->fix_oop_relocations();
- }
- }
- }
- };
-
void work(uint worker_id) {
if (worker_id >= _n_workers) return; // no work needed this round
@@ -4932,7 +4978,7 @@
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
// Now scan the complement of the collection set.
- MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
+ G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
@@ -5979,12 +6025,6 @@
hot_card_cache->reset_hot_cache();
hot_card_cache->set_use_cache(true);
- // Migrate the strong code roots attached to each region in
- // the collection set. Ideally we would like to do this
- // after we have finished the scanning/evacuation of the
- // strong code roots for a particular heap region.
- migrate_strong_code_roots();
-
purge_code_root_memory();
if (g1_policy()->during_initial_mark_pause()) {
@@ -6024,7 +6064,7 @@
bool locked) {
assert(!hr->isHumongous(), "this is only for non-humongous regions");
assert(!hr->is_empty(), "the region should not be empty");
- assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
+ assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
assert(free_list != NULL, "pre-condition");
if (G1VerifyBitmaps) {
@@ -6055,7 +6095,7 @@
hr->set_notHumongous();
free_region(hr, free_list, par);
- uint i = hr->hrs_index() + 1;
+ uint i = hr->hrm_index() + 1;
while (i < last_index) {
HeapRegion* curr_hr = region_at(i);
assert(curr_hr->continuesHumongous(), "invariant");
@@ -6079,7 +6119,7 @@
assert(list != NULL, "list can't be null");
if (!list->is_empty()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
- _hrs.insert_list_into_free_list(list);
+ _hrm.insert_list_into_free_list(list);
}
}
@@ -6448,7 +6488,7 @@
// While this cleanup is not strictly necessary to be done (or done instantly),
// given that their occurrence is very low, this saves us this additional
// complexity.
- uint region_idx = r->hrs_index();
+ uint region_idx = r->hrm_index();
if (g1h->humongous_is_live(region_idx) ||
g1h->humongous_region_is_always_live(region_idx)) {
@@ -6687,22 +6727,22 @@
// this is that during a full GC string deduplication needs to know if
// a collected region was young or old when the full GC was initiated.
}
- _hrs.remove_all_free_regions();
+ _hrm.remove_all_free_regions();
}
class RebuildRegionSetsClosure : public HeapRegionClosure {
private:
bool _free_list_only;
HeapRegionSet* _old_set;
- HeapRegionSeq* _hrs;
+ HeapRegionManager* _hrm;
size_t _total_used;
public:
RebuildRegionSetsClosure(bool free_list_only,
- HeapRegionSet* old_set, HeapRegionSeq* hrs) :
+ HeapRegionSet* old_set, HeapRegionManager* hrm) :
_free_list_only(free_list_only),
- _old_set(old_set), _hrs(hrs), _total_used(0) {
- assert(_hrs->num_free_regions() == 0, "pre-condition");
+ _old_set(old_set), _hrm(hrm), _total_used(0) {
+ assert(_hrm->num_free_regions() == 0, "pre-condition");
if (!free_list_only) {
assert(_old_set->is_empty(), "pre-condition");
}
@@ -6715,7 +6755,7 @@
if (r->is_empty()) {
// Add free regions to the free list
- _hrs->insert_into_free_list(r);
+ _hrm->insert_into_free_list(r);
} else if (!_free_list_only) {
assert(!r->is_young(), "we should not come across young regions");
@@ -6743,7 +6783,7 @@
_young_list->empty_list();
}
- RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
+ RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
heap_region_iterate(&cl);
if (!free_list_only) {
@@ -6933,7 +6973,7 @@
private:
HeapRegionSet* _old_set;
HeapRegionSet* _humongous_set;
- HeapRegionSeq* _hrs;
+ HeapRegionManager* _hrm;
public:
HeapRegionSetCount _old_count;
@@ -6942,8 +6982,8 @@
VerifyRegionListsClosure(HeapRegionSet* old_set,
HeapRegionSet* humongous_set,
- HeapRegionSeq* hrs) :
- _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
+ HeapRegionManager* hrm) :
+ _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _humongous_count(), _free_count(){ }
bool doHeapRegion(HeapRegion* hr) {
@@ -6954,19 +6994,19 @@
if (hr->is_young()) {
// TODO
} else if (hr->startsHumongous()) {
- assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
+ assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
_humongous_count.increment(1u, hr->capacity());
} else if (hr->is_empty()) {
- assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
+ assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
_free_count.increment(1u, hr->capacity());
} else {
- assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
+ assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
_old_count.increment(1u, hr->capacity());
}
return false;
}
- void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
+ void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
old_set->total_capacity_bytes(), _old_count.capacity()));
@@ -6985,7 +7025,7 @@
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
// First, check the explicit lists.
- _hrs.verify();
+ _hrm.verify();
{
// Given that a concurrent operation might be adding regions to
// the secondary free list we have to take the lock before
@@ -7016,9 +7056,9 @@
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
- VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
+ VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
heap_region_iterate(&cl);
- cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
+ cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
}
// Optimized nmethod scanning
@@ -7037,13 +7077,8 @@
" starting at "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
- // HeapRegion::add_strong_code_root() avoids adding duplicate
- // entries but having duplicates is OK since we "mark" nmethods
- // as visited when we scan the strong code root lists during the GC.
- hr->add_strong_code_root(_nm);
- assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
- err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
- _nm, HR_FORMAT_PARAMS(hr)));
+ // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
+ hr->add_strong_code_root_locked(_nm);
}
}
@@ -7070,9 +7105,6 @@
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
hr->remove_strong_code_root(_nm);
- assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
- err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
- _nm, HR_FORMAT_PARAMS(hr)));
}
}
@@ -7100,28 +7132,9 @@
nm->oops_do(®_cl, true);
}
-class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
-public:
- bool doHeapRegion(HeapRegion *hr) {
- assert(!hr->isHumongous(),
- err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
- HR_FORMAT_PARAMS(hr)));
- hr->migrate_strong_code_roots();
- return false;
- }
-};
-
-void G1CollectedHeap::migrate_strong_code_roots() {
- MigrateCodeRootsHeapRegionClosure cl;
- double migrate_start = os::elapsedTime();
- collection_set_iterate(&cl);
- double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
- g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
-}
-
void G1CollectedHeap::purge_code_root_memory() {
double purge_start = os::elapsedTime();
- G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
+ G1CodeRootSet::purge();
double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -33,7 +33,7 @@
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
+#include "gc_implementation/g1/heapRegionManager.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
@@ -291,7 +291,7 @@
G1RegionMappingChangedListener _listener;
// The sequence of all heap regions in the heap.
- HeapRegionSeq _hrs;
+ HeapRegionManager _hrm;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
@@ -415,6 +415,7 @@
volatile unsigned int _old_marking_cycles_completed;
bool _concurrent_cycle_started;
+ bool _heap_summary_sent;
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
@@ -429,7 +430,7 @@
// If the HR printer is active, dump the state of the regions in the
// heap after a compaction.
- void print_hrs_post_compaction();
+ void print_hrm_post_compaction();
double verify(bool guard, const char* msg);
void verify_before_gc();
@@ -715,7 +716,7 @@
// We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region.
void register_region_with_in_cset_fast_test(HeapRegion* r) {
- _in_cset_fast_test.set_in_cset(r->hrs_index());
+ _in_cset_fast_test.set_in_cset(r->hrm_index());
}
// This is a fast test on whether a reference points into the
@@ -1171,17 +1172,17 @@
// But G1CollectedHeap doesn't yet support this.
virtual bool is_maximal_no_gc() const {
- return _hrs.available() == 0;
+ return _hrm.available() == 0;
}
// The current number of regions in the heap.
- uint num_regions() const { return _hrs.length(); }
+ uint num_regions() const { return _hrm.length(); }
// The max number of regions in the heap.
- uint max_regions() const { return _hrs.max_length(); }
+ uint max_regions() const { return _hrm.max_length(); }
// The number of regions that are completely free.
- uint num_free_regions() const { return _hrs.num_free_regions(); }
+ uint num_free_regions() const { return _hrm.num_free_regions(); }
// The number of regions that are not completely free.
uint num_used_regions() const { return num_regions() - num_free_regions(); }
@@ -1233,7 +1234,7 @@
#ifdef ASSERT
bool is_on_master_free_list(HeapRegion* hr) {
- return _hrs.is_free(hr);
+ return _hrm.is_free(hr);
}
#endif // ASSERT
@@ -1245,7 +1246,7 @@
}
void append_secondary_free_list() {
- _hrs.insert_list_into_free_list(&_secondary_free_list);
+ _hrm.insert_list_into_free_list(&_secondary_free_list);
}
void append_secondary_free_list_if_not_empty_with_lock() {
@@ -1356,13 +1357,13 @@
// Return "TRUE" iff the given object address is in the reserved
// region of g1.
bool is_in_g1_reserved(const void* p) const {
- return _hrs.reserved().contains(p);
+ return _hrm.reserved().contains(p);
}
// Returns a MemRegion that corresponds to the space that has been
// reserved for the heap
MemRegion g1_reserved() const {
- return _hrs.reserved();
+ return _hrm.reserved();
}
virtual bool is_in_closed_subset(const void* p) const;
@@ -1661,12 +1662,6 @@
// Unregister the given nmethod from the G1 heap.
virtual void unregister_nmethod(nmethod* nm);
- // Migrate the nmethods in the code root lists of the regions
- // in the collection set to regions in to-space. In the event
- // of an evacuation failure, nmethods that reference objects
- // that were not successfully evacuated are not migrated.
- void migrate_strong_code_roots();
-
// Free up superfluous code root memory.
void purge_code_root_memory();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -30,15 +30,15 @@
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "utilities/taskqueue.hpp"
// Inline functions for G1CollectedHeap
// Return the region with the given index. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
+inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
assert(is_in_reserved(addr),
@@ -48,7 +48,7 @@
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
- return _hrs.reserved().start() + index * HeapRegion::GrainWords;
+ return _hrm.reserved().start() + index * HeapRegion::GrainWords;
}
template <class T>
@@ -57,7 +57,7 @@
assert(is_in_g1_reserved((const void*) addr),
err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
- return _hrs.addr_to_region((HeapWord*) addr);
+ return _hrm.addr_to_region((HeapWord*) addr);
}
template <class T>
@@ -87,7 +87,7 @@
}
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
- HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
+ HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -217,6 +217,8 @@
_update_rset_cl->set_region(hr);
hr->object_iterate(&rspc);
+ hr->rem_set()->clean_strong_code_roots(hr);
+
hr->note_self_forwarding_removal_end(during_initial_mark,
during_conc_mark,
rspc.marked_bytes());
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -275,9 +275,6 @@
// Now subtract the time taken to fix up roots in generated code
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
- // Strong code root migration time
- misc_time_ms += _cur_strong_code_root_migration_time_ms;
-
// Strong code root purge time
misc_time_ms += _cur_strong_code_root_purge_time_ms;
@@ -328,7 +325,6 @@
_last_obj_copy_times_ms.print(1, "Object Copy (ms)");
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
- print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) {
print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -129,7 +129,6 @@
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
- double _cur_strong_code_root_migration_time_ms;
double _cur_strong_code_root_purge_time_ms;
double _cur_evac_fail_recalc_used;
@@ -233,10 +232,6 @@
_cur_collection_code_root_fixup_time_ms = ms;
}
- void record_strong_code_root_migration_time(double ms) {
- _cur_strong_code_root_migration_time_ms = ms;
- }
-
void record_strong_code_root_purge_time(double ms) {
_cur_strong_code_root_purge_time_ms = ms;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -32,7 +32,7 @@
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
@@ -110,7 +110,7 @@
G1CollectedHeap* _g1h;
OopsInHeapRegionClosure* _oc;
- CodeBlobToOopClosure* _code_root_cl;
+ CodeBlobClosure* _code_root_cl;
G1BlockOffsetSharedArray* _bot_shared;
G1SATBCardTableModRefBS *_ct_bs;
@@ -122,7 +122,7 @@
public:
ScanRSClosure(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i) :
_oc(oc),
_code_root_cl(code_root_cl),
@@ -242,7 +242,7 @@
};
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
@@ -321,7 +321,7 @@
}
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i) {
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -96,7 +96,7 @@
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
@@ -108,7 +108,7 @@
void cleanup_after_oops_into_collection_set_do();
void scanRS(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i);
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -253,6 +253,7 @@
size_t occupied_cards = hrrs->occupied();
size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
if (code_root_mem_sz > max_code_root_mem_sz()) {
+ _max_code_root_mem_sz = code_root_mem_sz;
_max_code_root_mem_sz_region = r;
}
size_t code_root_elems = hrrs->strong_code_roots_list_length();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -277,10 +277,6 @@
product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \
\
- experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \
- "The amount of code root chunks that should be kept at most " \
- "as percentage of already allocated.") \
- \
experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \
"Try to reclaim dead large objects at every young GC.") \
\
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -29,7 +29,7 @@
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/shared/liveRange.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
@@ -322,34 +322,11 @@
return false;
}
-HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
- HeapWord* low = addr;
- HeapWord* high = end();
- while (low < high) {
- size_t diff = pointer_delta(high, low);
- // Must add one below to bias toward the high amount. Otherwise, if
- // "high" were at the desired value, and "low" were one less, we
- // would not converge on "high". This is not symmetric, because
- // we set "high" to a block start, which might be the right one,
- // which we don't do for "low".
- HeapWord* middle = low + (diff+1)/2;
- if (middle == high) return high;
- HeapWord* mid_bs = block_start_careful(middle);
- if (mid_bs < addr) {
- low = middle;
- } else {
- high = mid_bs;
- }
- }
- assert(low == high && low >= addr, "Didn't work.");
- return low;
-}
-
-HeapRegion::HeapRegion(uint hrs_index,
+HeapRegion::HeapRegion(uint hrm_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) :
G1OffsetTableContigSpace(sharedOffsetArray, mr),
- _hrs_index(hrs_index),
+ _hrm_index(hrm_index),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
@@ -563,21 +540,17 @@
hrrs->add_strong_code_root(nm);
}
+void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ HeapRegionRemSet* hrrs = rem_set();
+ hrrs->add_strong_code_root_locked(nm);
+}
+
void HeapRegion::remove_strong_code_root(nmethod* nm) {
HeapRegionRemSet* hrrs = rem_set();
hrrs->remove_strong_code_root(nm);
}
-void HeapRegion::migrate_strong_code_roots() {
- assert(in_collection_set(), "only collection set regions");
- assert(!isHumongous(),
- err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
- HR_FORMAT_PARAMS(this)));
-
- HeapRegionRemSet* hrrs = rem_set();
- hrrs->migrate_strong_code_roots();
-}
-
void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
HeapRegionRemSet* hrrs = rem_set();
hrrs->strong_code_roots_do(blk);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -54,15 +54,15 @@
#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
- (_hr_)->hrs_index(), \
+ (_hr_)->hrm_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
(_hr_)->startsHumongous() ? "HS" : \
(_hr_)->continuesHumongous() ? "HC" : \
!(_hr_)->is_empty() ? "O" : "F", \
p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
-// sentinel value for hrs_index
-#define G1_NO_HRS_INDEX ((uint) -1)
+// sentinel value for hrm_index
+#define G1_NO_HRM_INDEX ((uint) -1)
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
@@ -206,10 +206,6 @@
_offsets.reset_bot();
}
- void update_bot_for_object(HeapWord* start, size_t word_size) {
- _offsets.alloc_block(start, word_size);
- }
-
void print_bot_on(outputStream* out) {
_offsets.print_on(out);
}
@@ -234,7 +230,7 @@
protected:
// The index of this region in the heap region sequence.
- uint _hrs_index;
+ uint _hrm_index;
HumongousType _humongous_type;
// For a humongous region, region in which it starts.
@@ -330,7 +326,7 @@
size_t _predicted_bytes_to_copy;
public:
- HeapRegion(uint hrs_index,
+ HeapRegion(uint hrm_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr);
@@ -385,9 +381,9 @@
inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
inline HeapWord* allocate_no_bot_updates(size_t word_size);
- // If this region is a member of a HeapRegionSeq, the index in that
+ // If this region is a member of a HeapRegionManager, the index in that
// sequence, otherwise -1.
- uint hrs_index() const { return _hrs_index; }
+ uint hrm_index() const { return _hrm_index; }
// The number of bytes marked live in the region in the last marking phase.
size_t marked_bytes() { return _prev_marked_bytes; }
@@ -458,7 +454,7 @@
// with this HS region.
uint last_hc_index() const {
assert(startsHumongous(), "don't call this otherwise");
- return hrs_index() + region_num();
+ return hrm_index() + region_num();
}
// Same as Space::is_in_reserved, but will use the original size of the region.
@@ -570,7 +566,7 @@
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
- HeapWord* orig_end() { return _orig_end; }
+ HeapWord* orig_end() const { return _orig_end; }
// Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space, bool locked = false);
@@ -737,18 +733,6 @@
bool filter_young,
jbyte* card_ptr);
- // A version of block start that is guaranteed to find *some* block
- // boundary at or before "p", but does not object iteration, and may
- // therefore be used safely when the heap is unparseable.
- HeapWord* block_start_careful(const void* p) const {
- return _offsets.block_start_careful(p);
- }
-
- // Requires that "addr" is within the region. Returns the start of the
- // first ("careful") block that starts at or after "addr", or else the
- // "end" of the region if there is no such block.
- HeapWord* next_block_start_careful(HeapWord* addr);
-
size_t recorded_rs_length() const { return _recorded_rs_length; }
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
@@ -772,14 +756,9 @@
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void add_strong_code_root(nmethod* nm);
+ void add_strong_code_root_locked(nmethod* nm);
void remove_strong_code_root(nmethod* nm);
- // During a collection, migrate the successfully evacuated
- // strong code roots that referenced into this region to the
- // new regions that they now point into. Unsuccessfully
- // evacuated code roots are not migrated.
- void migrate_strong_code_roots();
-
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list for this region
void strong_code_roots_do(CodeBlobClosure* blk) const;
@@ -813,7 +792,7 @@
// HeapRegionClosure is used for iterating over regions.
// Terminates the iteration when the "doHeapRegion" method returns "true".
class HeapRegionClosure : public StackObj {
- friend class HeapRegionSeq;
+ friend class HeapRegionManager;
friend class G1CollectedHeap;
bool _complete;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/concurrentG1Refine.hpp"
+#include "memory/allocation.hpp"
+
+void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
+ G1RegionToSpaceMapper* prev_bitmap,
+ G1RegionToSpaceMapper* next_bitmap,
+ G1RegionToSpaceMapper* bot,
+ G1RegionToSpaceMapper* cardtable,
+ G1RegionToSpaceMapper* card_counts) {
+ _allocated_heapregions_length = 0;
+
+ _heap_mapper = heap_storage;
+
+ _prev_bitmap_mapper = prev_bitmap;
+ _next_bitmap_mapper = next_bitmap;
+
+ _bot_mapper = bot;
+ _cardtable_mapper = cardtable;
+
+ _card_counts_mapper = card_counts;
+
+ MemRegion reserved = heap_storage->reserved();
+ _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
+
+ _available_map.resize(_regions.length(), false);
+ _available_map.clear();
+}
+
+bool HeapRegionManager::is_available(uint region) const {
+ return _available_map.at(region);
+}
+
+#ifdef ASSERT
+bool HeapRegionManager::is_free(HeapRegion* hr) const {
+ return _free_list.contains(hr);
+}
+#endif
+
+HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
+ HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index);
+ MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+ assert(reserved().contains(mr), "invariant");
+ return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr);
+}
+
+void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
+ guarantee(num_regions > 0, "Must commit more than zero regions");
+ guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
+
+ _num_committed += (uint)num_regions;
+
+ _heap_mapper->commit_regions(index, num_regions);
+
+ // Also commit auxiliary data
+ _prev_bitmap_mapper->commit_regions(index, num_regions);
+ _next_bitmap_mapper->commit_regions(index, num_regions);
+
+ _bot_mapper->commit_regions(index, num_regions);
+ _cardtable_mapper->commit_regions(index, num_regions);
+
+ _card_counts_mapper->commit_regions(index, num_regions);
+}
+
+void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
+ guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
+ guarantee(_num_committed >= num_regions, "pre-condition");
+
+ // Print before uncommitting.
+ if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+ for (uint i = start; i < start + num_regions; i++) {
+ HeapRegion* hr = at(i);
+ G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
+ }
+ }
+
+ _num_committed -= (uint)num_regions;
+
+ _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
+ _heap_mapper->uncommit_regions(start, num_regions);
+
+ // Also uncommit auxiliary data
+ _prev_bitmap_mapper->uncommit_regions(start, num_regions);
+ _next_bitmap_mapper->uncommit_regions(start, num_regions);
+
+ _bot_mapper->uncommit_regions(start, num_regions);
+ _cardtable_mapper->uncommit_regions(start, num_regions);
+
+ _card_counts_mapper->uncommit_regions(start, num_regions);
+}
+
+void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
+ guarantee(num_regions > 0, "No point in calling this for zero regions");
+ commit_regions(start, num_regions);
+ for (uint i = start; i < start + num_regions; i++) {
+ if (_regions.get_by_index(i) == NULL) {
+ HeapRegion* new_hr = new_heap_region(i);
+ _regions.set_by_index(i, new_hr);
+ _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
+ }
+ }
+
+ _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
+
+ for (uint i = start; i < start + num_regions; i++) {
+ assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
+ HeapRegion* hr = at(i);
+ if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+ G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
+ }
+ HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
+ MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+
+ hr->initialize(mr);
+ insert_into_free_list(at(i));
+ }
+}
+
+uint HeapRegionManager::expand_by(uint num_regions) {
+ return expand_at(0, num_regions);
+}
+
+uint HeapRegionManager::expand_at(uint start, uint num_regions) {
+ if (num_regions == 0) {
+ return 0;
+ }
+
+ uint cur = start;
+ uint idx_last_found = 0;
+ uint num_last_found = 0;
+
+ uint expanded = 0;
+
+ while (expanded < num_regions &&
+ (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
+ uint to_expand = MIN2(num_regions - expanded, num_last_found);
+ make_regions_available(idx_last_found, to_expand);
+ expanded += to_expand;
+ cur = idx_last_found + num_last_found + 1;
+ }
+
+ verify_optional();
+ return expanded;
+}
+
+uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
+ uint found = 0;
+ size_t length_found = 0;
+ uint cur = 0;
+
+ while (length_found < num && cur < max_length()) {
+ HeapRegion* hr = _regions.get_by_index(cur);
+ if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
+ // This region is a potential candidate for allocation into.
+ length_found++;
+ } else {
+ // This region is not a candidate. The next region is the next possible one.
+ found = cur + 1;
+ length_found = 0;
+ }
+ cur++;
+ }
+
+ if (length_found == num) {
+ for (uint i = found; i < (found + num); i++) {
+ HeapRegion* hr = _regions.get_by_index(i);
+ // sanity check
+ guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
+ err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
+ " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
+ }
+ return found;
+ } else {
+ return G1_NO_HRM_INDEX;
+ }
+}
+
+HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
+ guarantee(r != NULL, "Start region must be a valid region");
+ guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index()));
+ for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
+ HeapRegion* hr = _regions.get_by_index(i);
+ if (is_available(i)) {
+ return hr;
+ }
+ }
+ return NULL;
+}
+
+void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
+ uint len = max_length();
+
+ for (uint i = 0; i < len; i++) {
+ if (!is_available(i)) {
+ continue;
+ }
+ guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
+ bool res = blk->doHeapRegion(at(i));
+ if (res) {
+ blk->incomplete();
+ return;
+ }
+ }
+}
+
+uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
+ guarantee(res_idx != NULL, "checking");
+ guarantee(start_idx <= (max_length() + 1), "checking");
+
+ uint num_regions = 0;
+
+ uint cur = start_idx;
+ while (cur < max_length() && is_available(cur)) {
+ cur++;
+ }
+ if (cur == max_length()) {
+ return num_regions;
+ }
+ *res_idx = cur;
+ while (cur < max_length() && !is_available(cur)) {
+ cur++;
+ }
+ num_regions = cur - *res_idx;
+#ifdef ASSERT
+ for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
+ assert(!is_available(i), "just checking");
+ }
+ assert(cur == max_length() || num_regions == 0 || is_available(cur),
+ err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
+#endif
+ return num_regions;
+}
+
+uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
+ return num_regions * worker_i / num_workers;
+}
+
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
+ const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
+
+ // Every worker will actually look at all regions, skipping over regions that
+ // are currently not committed.
+ // This also (potentially) iterates over regions newly allocated during GC. This
+ // is no problem except for some extra work.
+ for (uint count = 0; count < _allocated_heapregions_length; count++) {
+ const uint index = (start_index + count) % _allocated_heapregions_length;
+ assert(0 <= index && index < _allocated_heapregions_length, "sanity");
+ // Skip over unavailable regions
+ if (!is_available(index)) {
+ continue;
+ }
+ HeapRegion* r = _regions.get_by_index(index);
+ // We'll ignore "continues humongous" regions (we'll process them
+ // when we come across their corresponding "start humongous"
+ // region) and regions already claimed.
+ if (r->claim_value() == claim_value || r->continuesHumongous()) {
+ continue;
+ }
+ // OK, try to claim it
+ if (!r->claimHeapRegion(claim_value)) {
+ continue;
+ }
+ // Success!
+ if (r->startsHumongous()) {
+ // If the region is "starts humongous" we'll iterate over its
+ // "continues humongous" first; in fact we'll do them
+ // first. The order is important. In one case, calling the
+ // closure on the "starts humongous" region might de-allocate
+ // and clear all its "continues humongous" regions and, as a
+ // result, we might end up processing them twice. So, we'll do
+ // them first (note: most closures will ignore them anyway) and
+ // then we'll do the "starts humongous" region.
+ for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
+ HeapRegion* chr = _regions.get_by_index(ch_index);
+
+ assert(chr->continuesHumongous(), "Must be humongous region");
+ assert(chr->humongous_start_region() == r,
+ err_msg("Must work on humongous continuation of the original start region "
+ PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
+ assert(chr->claim_value() != claim_value,
+ "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
+
+ bool claim_result = chr->claimHeapRegion(claim_value);
+ // We should always be able to claim it; no one else should
+ // be trying to claim this region.
+ guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
+
+ bool res2 = blk->doHeapRegion(chr);
+ if (res2) {
+ return;
+ }
+
+ // Right now, this holds (i.e., no closure that actually
+ // does something with "continues humongous" regions
+ // clears them). We might have to weaken it in the future,
+ // but let's leave these two asserts here for extra safety.
+ assert(chr->continuesHumongous(), "should still be the case");
+ assert(chr->humongous_start_region() == r, "sanity");
+ }
+ }
+
+ bool res = blk->doHeapRegion(r);
+ if (res) {
+ return;
+ }
+ }
+}
+
+uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
+ assert(length() > 0, "the region sequence should not be empty");
+ assert(length() <= _allocated_heapregions_length, "invariant");
+ assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
+ assert(num_regions_to_remove < length(), "We should never remove all regions");
+
+ if (num_regions_to_remove == 0) {
+ return 0;
+ }
+
+ uint removed = 0;
+ uint cur = _allocated_heapregions_length - 1;
+ uint idx_last_found = 0;
+ uint num_last_found = 0;
+
+ while ((removed < num_regions_to_remove) &&
+ (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
+ uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
+
+ uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
+
+ cur -= num_last_found;
+ removed += to_remove;
+ }
+
+ verify_optional();
+
+ return removed;
+}
+
+uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
+ guarantee(start_idx < _allocated_heapregions_length, "checking");
+ guarantee(res_idx != NULL, "checking");
+
+ uint num_regions_found = 0;
+
+ jlong cur = start_idx;
+ while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
+ cur--;
+ }
+ if (cur == -1) {
+ return num_regions_found;
+ }
+ jlong old_cur = cur;
+ // cur indexes the first empty region
+ while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
+ cur--;
+ }
+ *res_idx = cur + 1;
+ num_regions_found = old_cur - cur;
+
+#ifdef ASSERT
+ for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
+ assert(at(i)->is_empty(), "just checking");
+ }
+#endif
+ return num_regions_found;
+}
+
+void HeapRegionManager::verify() {
+ guarantee(length() <= _allocated_heapregions_length,
+ err_msg("invariant: _length: %u _allocated_length: %u",
+ length(), _allocated_heapregions_length));
+ guarantee(_allocated_heapregions_length <= max_length(),
+ err_msg("invariant: _allocated_length: %u _max_length: %u",
+ _allocated_heapregions_length, max_length()));
+
+ bool prev_committed = true;
+ uint num_committed = 0;
+ HeapWord* prev_end = heap_bottom();
+ for (uint i = 0; i < _allocated_heapregions_length; i++) {
+ if (!is_available(i)) {
+ prev_committed = false;
+ continue;
+ }
+ num_committed++;
+ HeapRegion* hr = _regions.get_by_index(i);
+ guarantee(hr != NULL, err_msg("invariant: i: %u", i));
+ guarantee(!prev_committed || hr->bottom() == prev_end,
+ err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
+ i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
+ guarantee(hr->hrm_index() == i,
+ err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index()));
+ // Asserts will fire if i is >= _length
+ HeapWord* addr = hr->bottom();
+ guarantee(addr_to_region(addr) == hr, "sanity");
+ // We cannot check whether the region is part of a particular set: at the time
+ // this method may be called, we have only completed allocation of the regions,
+ // but not put into a region set.
+ prev_committed = true;
+ if (hr->startsHumongous()) {
+ prev_end = hr->orig_end();
+ } else {
+ prev_end = hr->end();
+ }
+ }
+ for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
+ guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
+ }
+
+ guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
+ _free_list.verify();
+}
+
+#ifndef PRODUCT
+void HeapRegionManager::verify_optional() {
+ verify();
+}
+#endif // PRODUCT
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
+
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
+#include "gc_implementation/g1/heapRegionSet.hpp"
+
+class HeapRegion;
+class HeapRegionClosure;
+class FreeRegionList;
+
+class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
+ protected:
+ virtual HeapRegion* default_value() const { return NULL; }
+};
+
+// This class keeps track of the actual heap memory, auxiliary data
+// and its metadata (i.e., HeapRegion instances) and the list of free regions.
+//
+// This allows maximum flexibility for deciding what to commit or uncommit given
+// a request from outside.
+//
+// HeapRegions are kept in the _regions array in address order. A region's
+// index in the array corresponds to its index in the heap (i.e., 0 is the
+// region at the bottom of the heap, 1 is the one after it, etc.). Two
+// regions that are consecutive in the array should also be adjacent in the
+// address space (i.e., region(i).end() == region(i+1).bottom().
+//
+// We create a HeapRegion when we commit the region's address space
+// for the first time. When we uncommit the address space of a
+// region we retain the HeapRegion to be able to re-use it in the
+// future (in case we recommit it).
+//
+// We keep track of three lengths:
+//
+// * _num_committed (returned by length()) is the number of currently
+// committed regions. These may not be contiguous.
+// * _allocated_heapregions_length (not exposed outside this class) is the
+// number of regions+1 for which we have HeapRegions.
+// * max_length() returns the maximum number of regions the heap can have.
+//
+
+class HeapRegionManager: public CHeapObj<mtGC> {
+ friend class VMStructs;
+
+ G1HeapRegionTable _regions;
+
+ G1RegionToSpaceMapper* _heap_mapper;
+ G1RegionToSpaceMapper* _prev_bitmap_mapper;
+ G1RegionToSpaceMapper* _next_bitmap_mapper;
+ G1RegionToSpaceMapper* _bot_mapper;
+ G1RegionToSpaceMapper* _cardtable_mapper;
+ G1RegionToSpaceMapper* _card_counts_mapper;
+
+ FreeRegionList _free_list;
+
+ // Each bit in this bitmap indicates that the corresponding region is available
+ // for allocation.
+ BitMap _available_map;
+
+ // The number of regions committed in the heap.
+ uint _num_committed;
+
+ // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
+ uint _allocated_heapregions_length;
+
+ HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
+ HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+
+ void make_regions_available(uint index, uint num_regions = 1);
+
+ // Pass down commit calls to the VirtualSpace.
+ void commit_regions(uint index, size_t num_regions = 1);
+ void uncommit_regions(uint index, size_t num_regions = 1);
+
+ // Notify other data structures about change in the heap layout.
+ void update_committed_space(HeapWord* old_end, HeapWord* new_end);
+ // Calculate the starting region for each worker during parallel iteration so
+ // that they do not all start from the same region.
+ uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
+
+ // Find a contiguous set of empty or uncommitted regions of length num and return
+ // the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
+ // If only_empty is true, only empty regions are considered.
+ // Searches from bottom to top of the heap, doing a first-fit.
+ uint find_contiguous(size_t num, bool only_empty);
+ // Finds the next sequence of unavailable regions starting from start_idx. Returns the
+ // length of the sequence found. If this result is zero, no such sequence could be found,
+ // otherwise res_idx indicates the start index of these regions.
+ uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
+ // Finds the next sequence of empty regions starting from start_idx, going backwards in
+ // the heap. Returns the length of the sequence found. If this value is zero, no
+ // sequence could be found, otherwise res_idx contains the start index of this range.
+ uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+ // Allocate a new HeapRegion for the given index.
+ HeapRegion* new_heap_region(uint hrm_index);
+#ifdef ASSERT
+public:
+ bool is_free(HeapRegion* hr) const;
+#endif
+ // Returns whether the given region is available for allocation.
+ bool is_available(uint region) const;
+
+ public:
+ // Empty constructor, we'll initialize it with the initialize() method.
+ HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0),
+ _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
+ _allocated_heapregions_length(0), _available_map(),
+ _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
+ { }
+
+ void initialize(G1RegionToSpaceMapper* heap_storage,
+ G1RegionToSpaceMapper* prev_bitmap,
+ G1RegionToSpaceMapper* next_bitmap,
+ G1RegionToSpaceMapper* bot,
+ G1RegionToSpaceMapper* cardtable,
+ G1RegionToSpaceMapper* card_counts);
+
+ // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
+ // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
+ // the heap from the lowest address, this region (and its associated data
+ // structures) are available and we do not need to check further.
+ HeapRegion* get_dummy_region() { return new_heap_region(0); }
+
+ // Return the HeapRegion at the given index. Assume that the index
+ // is valid.
+ inline HeapRegion* at(uint index) const;
+
+ // If addr is within the committed space return its corresponding
+ // HeapRegion, otherwise return NULL.
+ inline HeapRegion* addr_to_region(HeapWord* addr) const;
+
+ // Insert the given region into the free region list.
+ inline void insert_into_free_list(HeapRegion* hr);
+
+ // Insert the given region list into the global free region list.
+ void insert_list_into_free_list(FreeRegionList* list) {
+ _free_list.add_ordered(list);
+ }
+
+ HeapRegion* allocate_free_region(bool is_old) {
+ HeapRegion* hr = _free_list.remove_region(is_old);
+
+ if (hr != NULL) {
+ assert(hr->next() == NULL, "Single region should not have next");
+ assert(is_available(hr->hrm_index()), "Must be committed");
+ }
+ return hr;
+ }
+
+ inline void allocate_free_regions_starting_at(uint first, uint num_regions);
+
+ // Remove all regions from the free list.
+ void remove_all_free_regions() {
+ _free_list.remove_all();
+ }
+
+ // Return the number of committed free regions in the heap.
+ uint num_free_regions() const {
+ return _free_list.length();
+ }
+
+ size_t total_capacity_bytes() const {
+ return num_free_regions() * HeapRegion::GrainBytes;
+ }
+
+ // Return the number of available (uncommitted) regions.
+ uint available() const { return max_length() - length(); }
+
+ // Return the number of regions that have been committed in the heap.
+ uint length() const { return _num_committed; }
+
+ // Return the maximum number of regions in the heap.
+ uint max_length() const { return (uint)_regions.length(); }
+
+ MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
+
+ // Expand the sequence to reflect that the heap has grown. Either create new
+ // HeapRegions, or re-use existing ones. Returns the number of regions the
+ // sequence was expanded by. If a HeapRegion allocation fails, the resulting
+ // number of regions might be smaller than what's desired.
+ uint expand_by(uint num_regions);
+
+ // Makes sure that the regions from start to start+num_regions-1 are available
+ // for allocation. Returns the number of regions that were committed to achieve
+ // this.
+ uint expand_at(uint start, uint num_regions);
+
+ // Find a contiguous set of empty regions of length num. Returns the start index of
+ // that set, or G1_NO_HRM_INDEX.
+ uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
+ // Find a contiguous set of empty or unavailable regions of length num. Returns the
+ // start index of that set, or G1_NO_HRM_INDEX.
+ uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
+
+ HeapRegion* next_region_in_heap(const HeapRegion* r) const;
+
+ // Apply blk->doHeapRegion() on all committed regions in address order,
+ // terminating the iteration early if doHeapRegion() returns true.
+ void iterate(HeapRegionClosure* blk) const;
+
+ void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
+
+ // Uncommit up to num_regions_to_remove regions that are completely free.
+ // Return the actual number of uncommitted regions.
+ uint shrink_by(uint num_regions_to_remove);
+
+ void verify();
+
+ // Do some sanity checking.
+ void verify_optional() PRODUCT_RETURN;
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.inline.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
+
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionManager.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
+ assert(addr < heap_end(),
+ err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end())));
+ assert(addr >= heap_bottom(),
+ err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
+
+ HeapRegion* hr = _regions.get_by_address(addr);
+ return hr;
+}
+
+inline HeapRegion* HeapRegionManager::at(uint index) const {
+ assert(is_available(index), "pre-condition");
+ HeapRegion* hr = _regions.get_by_index(index);
+ assert(hr != NULL, "sanity");
+ assert(hr->hrm_index() == index, "sanity");
+ return hr;
+}
+
+inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) {
+ _free_list.add_ordered(hr);
+}
+
+inline void HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) {
+ _free_list.remove_starting_at(at(first), num_regions);
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -27,7 +27,7 @@
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/padded.inline.hpp"
#include "memory/space.inline.hpp"
@@ -420,7 +420,7 @@
}
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
- uint cur_hrs_ind = hr()->hrs_index();
+ uint cur_hrm_ind = hr()->hrm_index();
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@@ -435,10 +435,10 @@
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
hr()->bottom(), from_card,
- FromCardCache::at((uint)tid, cur_hrs_ind));
+ FromCardCache::at((uint)tid, cur_hrm_ind));
}
- if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) {
+ if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" from-card cache hit.");
}
@@ -448,10 +448,10 @@
// Note that this may be a continued H region.
HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
- RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
+ RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
// If the region is already coarsened, return.
- if (_coarse_map.at(from_hrs_ind)) {
+ if (_coarse_map.at(from_hrm_ind)) {
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" coarse map hit.");
}
@@ -460,7 +460,7 @@
}
// Otherwise find a per-region table to add it to.
- size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
+ size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
PerRegionTable* prt = find_region_table(ind, from_hr);
if (prt == NULL) {
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
@@ -475,7 +475,7 @@
assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
"Must be in range.");
if (G1HRRSUseSparseTable &&
- _sparse_table.add_card(from_hrs_ind, card_index)) {
+ _sparse_table.add_card(from_hrm_ind, card_index)) {
if (G1RecordHRRSOops) {
HeapRegionRemSet::record(hr(), from);
if (G1TraceHeapRegionRememberedSet) {
@@ -495,7 +495,7 @@
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" [tid %d] sparse table entry "
"overflow(f: %d, t: %u)",
- tid, from_hrs_ind, cur_hrs_ind);
+ tid, from_hrm_ind, cur_hrm_ind);
}
}
@@ -516,7 +516,7 @@
if (G1HRRSUseSparseTable) {
// Transfer from sparse to fine-grain.
- SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
+ SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
assert(sprt_entry != NULL, "There should have been an entry");
for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
CardIdx_t c = sprt_entry->card(i);
@@ -525,7 +525,7 @@
}
}
// Now we can delete the sparse entry.
- bool res = _sparse_table.delete_entry(from_hrs_ind);
+ bool res = _sparse_table.delete_entry(from_hrm_ind);
assert(res, "It should have been there.");
}
}
@@ -607,9 +607,9 @@
guarantee(max != NULL, "Since _n_fine_entries > 0");
// Set the corresponding coarse bit.
- size_t max_hrs_index = (size_t) max->hr()->hrs_index();
- if (!_coarse_map.at(max_hrs_index)) {
- _coarse_map.at_put(max_hrs_index, true);
+ size_t max_hrm_index = (size_t) max->hr()->hrm_index();
+ if (!_coarse_map.at(max_hrm_index)) {
+ _coarse_map.at_put(max_hrm_index, true);
_n_coarse_entries++;
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
@@ -633,7 +633,7 @@
BitMap* region_bm, BitMap* card_bm) {
// First eliminated garbage regions from the coarse map.
if (G1RSScrubVerbose) {
- gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
+ gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
}
assert(_coarse_map.size() == region_bm->size(), "Precondition");
@@ -656,9 +656,9 @@
// If the entire region is dead, eliminate.
if (G1RSScrubVerbose) {
gclog_or_tty->print_cr(" For other region %u:",
- cur->hr()->hrs_index());
+ cur->hr()->hrm_index());
}
- if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
+ if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
*prev = nxt;
cur->set_collision_list_next(NULL);
_n_fine_entries--;
@@ -752,7 +752,7 @@
}
void OtherRegionsTable::clear_fcc() {
- FromCardCache::clear(hr()->hrs_index());
+ FromCardCache::clear(hr()->hrm_index());
}
void OtherRegionsTable::clear() {
@@ -803,7 +803,7 @@
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
- RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
+ RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
// Is this region in the coarse map?
if (_coarse_map.at(hr_ind)) return true;
@@ -840,7 +840,7 @@
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr)
: _bosa(bosa),
- _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
+ _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
_code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
reset_for_par_iteration();
}
@@ -926,9 +926,25 @@
}
// Code roots support
+//
+// The code root set is protected by two separate locking schemes
+// When at safepoint the per-hrrs lock must be held during modifications
+// except when doing a full gc.
+// When not at safepoint the CodeCache_lock must be held during modifications.
+// When concurrent readers access the contains() function
+// (during the evacuation phase) no removals are allowed.
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
+ // Optimistic unlocked contains-check
+ if (!_code_roots.contains(nm)) {
+ MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
+ add_strong_code_root_locked(nm);
+ }
+}
+
+void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
+ assert(nm != NULL, "sanity");
_code_roots.add(nm);
}
@@ -936,96 +952,19 @@
assert(nm != NULL, "sanity");
assert_locked_or_safepoint(CodeCache_lock);
- _code_roots.remove_lock_free(nm);
+ MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
+ _code_roots.remove(nm);
// Check that there were no duplicates
guarantee(!_code_roots.contains(nm), "duplicate entry found");
}
-class NMethodMigrationOopClosure : public OopClosure {
- G1CollectedHeap* _g1h;
- HeapRegion* _from;
- nmethod* _nm;
-
- uint _num_self_forwarded;
-
- template <class T> void do_oop_work(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (_from->is_in(obj)) {
- // Reference still points into the source region.
- // Since roots are immediately evacuated this means that
- // we must have self forwarded the object
- assert(obj->is_forwarded(),
- err_msg("code roots should be immediately evacuated. "
- "Ref: "PTR_FORMAT", "
- "Obj: "PTR_FORMAT", "
- "Region: "HR_FORMAT,
- p, (void*) obj, HR_FORMAT_PARAMS(_from)));
- assert(obj->forwardee() == obj,
- err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
-
- // The object has been self forwarded.
- // Note, if we're during an initial mark pause, there is
- // no need to explicitly mark object. It will be marked
- // during the regular evacuation failure handling code.
- _num_self_forwarded++;
- } else {
- // The reference points into a promotion or to-space region
- HeapRegion* to = _g1h->heap_region_containing(obj);
- to->rem_set()->add_strong_code_root(_nm);
- }
- }
- }
-
-public:
- NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
- _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
-
- void do_oop(narrowOop* p) { do_oop_work(p); }
- void do_oop(oop* p) { do_oop_work(p); }
-
- uint retain() { return _num_self_forwarded > 0; }
-};
-
-void HeapRegionRemSet::migrate_strong_code_roots() {
- assert(hr()->in_collection_set(), "only collection set regions");
- assert(!hr()->isHumongous(),
- err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
- HR_FORMAT_PARAMS(hr())));
-
- ResourceMark rm;
-
- // List of code blobs to retain for this region
- GrowableArray<nmethod*> to_be_retained(10);
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
- while (!_code_roots.is_empty()) {
- nmethod *nm = _code_roots.pop();
- if (nm != NULL) {
- NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
- nm->oops_do(&oop_cl);
- if (oop_cl.retain()) {
- to_be_retained.push(nm);
- }
- }
- }
-
- // Now push any code roots we need to retain
- assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
- "Retained nmethod list must be empty or "
- "evacuation of this region failed");
-
- while (to_be_retained.is_nonempty()) {
- nmethod* nm = to_be_retained.pop();
- assert(nm != NULL, "sanity");
- add_strong_code_root(nm);
- }
+void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
+ _code_roots.nmethods_do(blk);
}
-void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
- _code_roots.nmethods_do(blk);
+void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
+ _code_roots.clean(hr);
}
size_t HeapRegionRemSet::strong_code_roots_mem_size() {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -349,13 +349,13 @@
// Returns the memory occupancy of all static data structures associated
// with remembered sets.
static size_t static_mem_size() {
- return OtherRegionsTable::static_mem_size() + G1CodeRootSet::free_chunks_static_mem_size();
+ return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
}
// Returns the memory occupancy of all free_list data structures associated
// with remembered sets.
static size_t fl_mem_size() {
- return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::free_chunks_mem_size();
+ return OtherRegionsTable::fl_mem_size();
}
bool contains_reference(OopOrNarrowOopStar from) const {
@@ -365,18 +365,15 @@
// Routines for managing the list of code roots that point into
// the heap region that owns this RSet.
void add_strong_code_root(nmethod* nm);
+ void add_strong_code_root_locked(nmethod* nm);
void remove_strong_code_root(nmethod* nm);
- // During a collection, migrate the successfully evacuated strong
- // code roots that referenced into the region that owns this RSet
- // to the RSets of the new regions that they now point into.
- // Unsuccessfully evacuated code roots are not migrated.
- void migrate_strong_code_roots();
-
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list
void strong_code_roots_do(CodeBlobClosure* blk) const;
+ void clean_strong_code_roots(HeapRegion* hr);
+
// Returns the number of elements in the strong code roots list
size_t strong_code_roots_list_length() const {
return _code_roots.length();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Mon Sep 08 16:05:48 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,450 +0,0 @@
-/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
-#include "gc_implementation/g1/heapRegionSet.inline.hpp"
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/concurrentG1Refine.hpp"
-#include "memory/allocation.hpp"
-
-void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
- G1RegionToSpaceMapper* prev_bitmap,
- G1RegionToSpaceMapper* next_bitmap,
- G1RegionToSpaceMapper* bot,
- G1RegionToSpaceMapper* cardtable,
- G1RegionToSpaceMapper* card_counts) {
- _allocated_heapregions_length = 0;
-
- _heap_mapper = heap_storage;
-
- _prev_bitmap_mapper = prev_bitmap;
- _next_bitmap_mapper = next_bitmap;
-
- _bot_mapper = bot;
- _cardtable_mapper = cardtable;
-
- _card_counts_mapper = card_counts;
-
- MemRegion reserved = heap_storage->reserved();
- _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
-
- _available_map.resize(_regions.length(), false);
- _available_map.clear();
-}
-
-bool HeapRegionSeq::is_available(uint region) const {
- return _available_map.at(region);
-}
-
-#ifdef ASSERT
-bool HeapRegionSeq::is_free(HeapRegion* hr) const {
- return _free_list.contains(hr);
-}
-#endif
-
-HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
- HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index);
- MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
- assert(reserved().contains(mr), "invariant");
- return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
-}
-
-void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
- guarantee(num_regions > 0, "Must commit more than zero regions");
- guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
-
- _num_committed += (uint)num_regions;
-
- _heap_mapper->commit_regions(index, num_regions);
-
- // Also commit auxiliary data
- _prev_bitmap_mapper->commit_regions(index, num_regions);
- _next_bitmap_mapper->commit_regions(index, num_regions);
-
- _bot_mapper->commit_regions(index, num_regions);
- _cardtable_mapper->commit_regions(index, num_regions);
-
- _card_counts_mapper->commit_regions(index, num_regions);
-}
-
-void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
- guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
- guarantee(_num_committed >= num_regions, "pre-condition");
-
- // Print before uncommitting.
- if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
- for (uint i = start; i < start + num_regions; i++) {
- HeapRegion* hr = at(i);
- G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
- }
- }
-
- _num_committed -= (uint)num_regions;
-
- _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
- _heap_mapper->uncommit_regions(start, num_regions);
-
- // Also uncommit auxiliary data
- _prev_bitmap_mapper->uncommit_regions(start, num_regions);
- _next_bitmap_mapper->uncommit_regions(start, num_regions);
-
- _bot_mapper->uncommit_regions(start, num_regions);
- _cardtable_mapper->uncommit_regions(start, num_regions);
-
- _card_counts_mapper->uncommit_regions(start, num_regions);
-}
-
-void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
- guarantee(num_regions > 0, "No point in calling this for zero regions");
- commit_regions(start, num_regions);
- for (uint i = start; i < start + num_regions; i++) {
- if (_regions.get_by_index(i) == NULL) {
- HeapRegion* new_hr = new_heap_region(i);
- _regions.set_by_index(i, new_hr);
- _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
- }
- }
-
- _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
-
- for (uint i = start; i < start + num_regions; i++) {
- assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
- HeapRegion* hr = at(i);
- if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
- G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
- }
- HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
- MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
-
- hr->initialize(mr);
- insert_into_free_list(at(i));
- }
-}
-
-uint HeapRegionSeq::expand_by(uint num_regions) {
- return expand_at(0, num_regions);
-}
-
-uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
- if (num_regions == 0) {
- return 0;
- }
-
- uint cur = start;
- uint idx_last_found = 0;
- uint num_last_found = 0;
-
- uint expanded = 0;
-
- while (expanded < num_regions &&
- (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
- uint to_expand = MIN2(num_regions - expanded, num_last_found);
- make_regions_available(idx_last_found, to_expand);
- expanded += to_expand;
- cur = idx_last_found + num_last_found + 1;
- }
-
- verify_optional();
- return expanded;
-}
-
-uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
- uint found = 0;
- size_t length_found = 0;
- uint cur = 0;
-
- while (length_found < num && cur < max_length()) {
- HeapRegion* hr = _regions.get_by_index(cur);
- if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
- // This region is a potential candidate for allocation into.
- length_found++;
- } else {
- // This region is not a candidate. The next region is the next possible one.
- found = cur + 1;
- length_found = 0;
- }
- cur++;
- }
-
- if (length_found == num) {
- for (uint i = found; i < (found + num); i++) {
- HeapRegion* hr = _regions.get_by_index(i);
- // sanity check
- guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
- err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
- " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
- }
- return found;
- } else {
- return G1_NO_HRS_INDEX;
- }
-}
-
-HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
- guarantee(r != NULL, "Start region must be a valid region");
- guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index()));
- for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) {
- HeapRegion* hr = _regions.get_by_index(i);
- if (is_available(i)) {
- return hr;
- }
- }
- return NULL;
-}
-
-void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
- uint len = max_length();
-
- for (uint i = 0; i < len; i++) {
- if (!is_available(i)) {
- continue;
- }
- guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
- bool res = blk->doHeapRegion(at(i));
- if (res) {
- blk->incomplete();
- return;
- }
- }
-}
-
-uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
- guarantee(res_idx != NULL, "checking");
- guarantee(start_idx <= (max_length() + 1), "checking");
-
- uint num_regions = 0;
-
- uint cur = start_idx;
- while (cur < max_length() && is_available(cur)) {
- cur++;
- }
- if (cur == max_length()) {
- return num_regions;
- }
- *res_idx = cur;
- while (cur < max_length() && !is_available(cur)) {
- cur++;
- }
- num_regions = cur - *res_idx;
-#ifdef ASSERT
- for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
- assert(!is_available(i), "just checking");
- }
- assert(cur == max_length() || num_regions == 0 || is_available(cur),
- err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
-#endif
- return num_regions;
-}
-
-uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
- return num_regions * worker_i / num_workers;
-}
-
-void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
- const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
-
- // Every worker will actually look at all regions, skipping over regions that
- // are currently not committed.
- // This also (potentially) iterates over regions newly allocated during GC. This
- // is no problem except for some extra work.
- for (uint count = 0; count < _allocated_heapregions_length; count++) {
- const uint index = (start_index + count) % _allocated_heapregions_length;
- assert(0 <= index && index < _allocated_heapregions_length, "sanity");
- // Skip over unavailable regions
- if (!is_available(index)) {
- continue;
- }
- HeapRegion* r = _regions.get_by_index(index);
- // We'll ignore "continues humongous" regions (we'll process them
- // when we come across their corresponding "start humongous"
- // region) and regions already claimed.
- if (r->claim_value() == claim_value || r->continuesHumongous()) {
- continue;
- }
- // OK, try to claim it
- if (!r->claimHeapRegion(claim_value)) {
- continue;
- }
- // Success!
- if (r->startsHumongous()) {
- // If the region is "starts humongous" we'll iterate over its
- // "continues humongous" first; in fact we'll do them
- // first. The order is important. In one case, calling the
- // closure on the "starts humongous" region might de-allocate
- // and clear all its "continues humongous" regions and, as a
- // result, we might end up processing them twice. So, we'll do
- // them first (note: most closures will ignore them anyway) and
- // then we'll do the "starts humongous" region.
- for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
- HeapRegion* chr = _regions.get_by_index(ch_index);
-
- assert(chr->continuesHumongous(), "Must be humongous region");
- assert(chr->humongous_start_region() == r,
- err_msg("Must work on humongous continuation of the original start region "
- PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
- assert(chr->claim_value() != claim_value,
- "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
-
- bool claim_result = chr->claimHeapRegion(claim_value);
- // We should always be able to claim it; no one else should
- // be trying to claim this region.
- guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
-
- bool res2 = blk->doHeapRegion(chr);
- if (res2) {
- return;
- }
-
- // Right now, this holds (i.e., no closure that actually
- // does something with "continues humongous" regions
- // clears them). We might have to weaken it in the future,
- // but let's leave these two asserts here for extra safety.
- assert(chr->continuesHumongous(), "should still be the case");
- assert(chr->humongous_start_region() == r, "sanity");
- }
- }
-
- bool res = blk->doHeapRegion(r);
- if (res) {
- return;
- }
- }
-}
-
-uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
- assert(length() > 0, "the region sequence should not be empty");
- assert(length() <= _allocated_heapregions_length, "invariant");
- assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
- assert(num_regions_to_remove < length(), "We should never remove all regions");
-
- if (num_regions_to_remove == 0) {
- return 0;
- }
-
- uint removed = 0;
- uint cur = _allocated_heapregions_length - 1;
- uint idx_last_found = 0;
- uint num_last_found = 0;
-
- while ((removed < num_regions_to_remove) &&
- (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
- // Only allow uncommit from the end of the heap.
- if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
- return 0;
- }
- uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
-
- uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
-
- cur -= num_last_found;
- removed += to_remove;
- }
-
- verify_optional();
-
- return removed;
-}
-
-uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
- guarantee(start_idx < _allocated_heapregions_length, "checking");
- guarantee(res_idx != NULL, "checking");
-
- uint num_regions_found = 0;
-
- jlong cur = start_idx;
- while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
- cur--;
- }
- if (cur == -1) {
- return num_regions_found;
- }
- jlong old_cur = cur;
- // cur indexes the first empty region
- while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
- cur--;
- }
- *res_idx = cur + 1;
- num_regions_found = old_cur - cur;
-
-#ifdef ASSERT
- for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
- assert(at(i)->is_empty(), "just checking");
- }
-#endif
- return num_regions_found;
-}
-
-void HeapRegionSeq::verify() {
- guarantee(length() <= _allocated_heapregions_length,
- err_msg("invariant: _length: %u _allocated_length: %u",
- length(), _allocated_heapregions_length));
- guarantee(_allocated_heapregions_length <= max_length(),
- err_msg("invariant: _allocated_length: %u _max_length: %u",
- _allocated_heapregions_length, max_length()));
-
- bool prev_committed = true;
- uint num_committed = 0;
- HeapWord* prev_end = heap_bottom();
- for (uint i = 0; i < _allocated_heapregions_length; i++) {
- if (!is_available(i)) {
- prev_committed = false;
- continue;
- }
- num_committed++;
- HeapRegion* hr = _regions.get_by_index(i);
- guarantee(hr != NULL, err_msg("invariant: i: %u", i));
- guarantee(!prev_committed || hr->bottom() == prev_end,
- err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
- i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
- guarantee(hr->hrs_index() == i,
- err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
- // Asserts will fire if i is >= _length
- HeapWord* addr = hr->bottom();
- guarantee(addr_to_region(addr) == hr, "sanity");
- // We cannot check whether the region is part of a particular set: at the time
- // this method may be called, we have only completed allocation of the regions,
- // but not put into a region set.
- prev_committed = true;
- if (hr->startsHumongous()) {
- prev_end = hr->orig_end();
- } else {
- prev_end = hr->end();
- }
- }
- for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
- guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
- }
-
- guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
- _free_list.verify();
-}
-
-#ifndef PRODUCT
-void HeapRegionSeq::verify_optional() {
- verify();
-}
-#endif // PRODUCT
-
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Mon Sep 08 16:05:48 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
-
-#include "gc_implementation/g1/g1BiasedArray.hpp"
-#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
-#include "gc_implementation/g1/heapRegionSet.hpp"
-
-class HeapRegion;
-class HeapRegionClosure;
-class FreeRegionList;
-
-class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
- protected:
- virtual HeapRegion* default_value() const { return NULL; }
-};
-
-// This class keeps track of the actual heap memory, auxiliary data
-// and its metadata (i.e., HeapRegion instances) and the list of free regions.
-//
-// This allows maximum flexibility for deciding what to commit or uncommit given
-// a request from outside.
-//
-// HeapRegions are kept in the _regions array in address order. A region's
-// index in the array corresponds to its index in the heap (i.e., 0 is the
-// region at the bottom of the heap, 1 is the one after it, etc.). Two
-// regions that are consecutive in the array should also be adjacent in the
-// address space (i.e., region(i).end() == region(i+1).bottom().
-//
-// We create a HeapRegion when we commit the region's address space
-// for the first time. When we uncommit the address space of a
-// region we retain the HeapRegion to be able to re-use it in the
-// future (in case we recommit it).
-//
-// We keep track of three lengths:
-//
-// * _num_committed (returned by length()) is the number of currently
-// committed regions. These may not be contiguous.
-// * _allocated_heapregions_length (not exposed outside this class) is the
-// number of regions+1 for which we have HeapRegions.
-// * max_length() returns the maximum number of regions the heap can have.
-//
-
-class HeapRegionSeq: public CHeapObj<mtGC> {
- friend class VMStructs;
-
- G1HeapRegionTable _regions;
-
- G1RegionToSpaceMapper* _heap_mapper;
- G1RegionToSpaceMapper* _prev_bitmap_mapper;
- G1RegionToSpaceMapper* _next_bitmap_mapper;
- G1RegionToSpaceMapper* _bot_mapper;
- G1RegionToSpaceMapper* _cardtable_mapper;
- G1RegionToSpaceMapper* _card_counts_mapper;
-
- FreeRegionList _free_list;
-
- // Each bit in this bitmap indicates that the corresponding region is available
- // for allocation.
- BitMap _available_map;
-
- // The number of regions committed in the heap.
- uint _num_committed;
-
- // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
- uint _allocated_heapregions_length;
-
- HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
- HeapWord* heap_end() const {return _regions.end_address_mapped(); }
-
- void make_regions_available(uint index, uint num_regions = 1);
-
- // Pass down commit calls to the VirtualSpace.
- void commit_regions(uint index, size_t num_regions = 1);
- void uncommit_regions(uint index, size_t num_regions = 1);
-
- // Notify other data structures about change in the heap layout.
- void update_committed_space(HeapWord* old_end, HeapWord* new_end);
- // Calculate the starting region for each worker during parallel iteration so
- // that they do not all start from the same region.
- uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
-
- // Find a contiguous set of empty or uncommitted regions of length num and return
- // the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
- // If only_empty is true, only empty regions are considered.
- // Searches from bottom to top of the heap, doing a first-fit.
- uint find_contiguous(size_t num, bool only_empty);
- // Finds the next sequence of unavailable regions starting from start_idx. Returns the
- // length of the sequence found. If this result is zero, no such sequence could be found,
- // otherwise res_idx indicates the start index of these regions.
- uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
- // Finds the next sequence of empty regions starting from start_idx, going backwards in
- // the heap. Returns the length of the sequence found. If this value is zero, no
- // sequence could be found, otherwise res_idx contains the start index of this range.
- uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
- // Allocate a new HeapRegion for the given index.
- HeapRegion* new_heap_region(uint hrs_index);
-#ifdef ASSERT
-public:
- bool is_free(HeapRegion* hr) const;
-#endif
- // Returns whether the given region is available for allocation.
- bool is_available(uint region) const;
-
- public:
- // Empty constructor, we'll initialize it with the initialize() method.
- HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
- _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
- _allocated_heapregions_length(0), _available_map(),
- _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
- { }
-
- void initialize(G1RegionToSpaceMapper* heap_storage,
- G1RegionToSpaceMapper* prev_bitmap,
- G1RegionToSpaceMapper* next_bitmap,
- G1RegionToSpaceMapper* bot,
- G1RegionToSpaceMapper* cardtable,
- G1RegionToSpaceMapper* card_counts);
-
- // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
- // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
- // the heap from the lowest address, this region (and its associated data
- // structures) are available and we do not need to check further.
- HeapRegion* get_dummy_region() { return new_heap_region(0); }
-
- // Return the HeapRegion at the given index. Assume that the index
- // is valid.
- inline HeapRegion* at(uint index) const;
-
- // If addr is within the committed space return its corresponding
- // HeapRegion, otherwise return NULL.
- inline HeapRegion* addr_to_region(HeapWord* addr) const;
-
- // Insert the given region into the free region list.
- inline void insert_into_free_list(HeapRegion* hr);
-
- // Insert the given region list into the global free region list.
- void insert_list_into_free_list(FreeRegionList* list) {
- _free_list.add_ordered(list);
- }
-
- HeapRegion* allocate_free_region(bool is_old) {
- HeapRegion* hr = _free_list.remove_region(is_old);
-
- if (hr != NULL) {
- assert(hr->next() == NULL, "Single region should not have next");
- assert(is_available(hr->hrs_index()), "Must be committed");
- }
- return hr;
- }
-
- inline void allocate_free_regions_starting_at(uint first, uint num_regions);
-
- // Remove all regions from the free list.
- void remove_all_free_regions() {
- _free_list.remove_all();
- }
-
- // Return the number of committed free regions in the heap.
- uint num_free_regions() const {
- return _free_list.length();
- }
-
- size_t total_capacity_bytes() const {
- return num_free_regions() * HeapRegion::GrainBytes;
- }
-
- // Return the number of available (uncommitted) regions.
- uint available() const { return max_length() - length(); }
-
- // Return the number of regions that have been committed in the heap.
- uint length() const { return _num_committed; }
-
- // Return the maximum number of regions in the heap.
- uint max_length() const { return (uint)_regions.length(); }
-
- MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
-
- // Expand the sequence to reflect that the heap has grown. Either create new
- // HeapRegions, or re-use existing ones. Returns the number of regions the
- // sequence was expanded by. If a HeapRegion allocation fails, the resulting
- // number of regions might be smaller than what's desired.
- uint expand_by(uint num_regions);
-
- // Makes sure that the regions from start to start+num_regions-1 are available
- // for allocation. Returns the number of regions that were committed to achieve
- // this.
- uint expand_at(uint start, uint num_regions);
-
- // Find a contiguous set of empty regions of length num. Returns the start index of
- // that set, or G1_NO_HRS_INDEX.
- uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
- // Find a contiguous set of empty or unavailable regions of length num. Returns the
- // start index of that set, or G1_NO_HRS_INDEX.
- uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
-
- HeapRegion* next_region_in_heap(const HeapRegion* r) const;
-
- // Apply blk->doHeapRegion() on all committed regions in address order,
- // terminating the iteration early if doHeapRegion() returns true.
- void iterate(HeapRegionClosure* blk) const;
-
- void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
-
- // Uncommit up to num_regions_to_remove regions that are completely free.
- // Return the actual number of uncommitted regions.
- uint shrink_by(uint num_regions_to_remove);
-
- void verify();
-
- // Do some sanity checking.
- void verify_optional() PRODUCT_RETURN;
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
-
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Mon Sep 08 16:05:48 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
-
-#include "gc_implementation/g1/heapRegion.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
-#include "gc_implementation/g1/heapRegionSet.inline.hpp"
-
-inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
- assert(addr < heap_end(),
- err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end())));
- assert(addr >= heap_bottom(),
- err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
-
- HeapRegion* hr = _regions.get_by_address(addr);
- return hr;
-}
-
-inline HeapRegion* HeapRegionSeq::at(uint index) const {
- assert(is_available(index), "pre-condition");
- HeapRegion* hr = _regions.get_by_index(index);
- assert(hr != NULL, "sanity");
- assert(hr->hrs_index() == index, "sanity");
- return hr;
-}
-
-inline void HeapRegionSeq::insert_into_free_list(HeapRegion* hr) {
- _free_list.add_ordered(hr);
-}
-
-inline void HeapRegionSeq::allocate_free_regions_starting_at(uint first, uint num_regions) {
- _free_list.remove_starting_at(at(first), num_regions);
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -39,11 +39,11 @@
#ifndef PRODUCT
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
- assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrs_index()));
- assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrs_index())); // currently we don't use these sets for young regions
- assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrs_index(), name()));
- assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrs_index(), name()));
- assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrs_index()));
+ assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
+ assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
+ assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
+ assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrm_index(), name()));
+ assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
}
#endif
@@ -158,7 +158,7 @@
HeapRegion* curr_from = from_list->_head;
while (curr_from != NULL) {
- while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
+ while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) {
curr_to = curr_to->next();
}
@@ -183,7 +183,7 @@
}
}
- if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
+ if (_tail->hrm_index() < from_list->_tail->hrm_index()) {
_tail = from_list->_tail;
}
}
@@ -309,8 +309,8 @@
if (curr->next() != NULL) {
guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up");
}
- guarantee(curr->hrs_index() == 0 || curr->hrs_index() > last_index, "List should be sorted");
- last_index = curr->hrs_index();
+ guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted");
+ last_index = curr->hrm_index();
capacity += curr->capacity();
@@ -319,7 +319,7 @@
curr = curr->next();
}
- guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrs_index(), prev0->hrs_index()));
+ guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index()));
guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -238,14 +238,14 @@
// Add hr to the list. The region should not be a member of another set.
// Assumes that the list is ordered and will preserve that order. The order
- // is determined by hrs_index.
+ // is determined by hrm_index.
inline void add_ordered(HeapRegion* hr);
// Removes from head or tail based on the given argument.
HeapRegion* remove_region(bool from_head);
// Merge two ordered lists. The result is also ordered. The order is
- // determined by hrs_index.
+ // determined by hrm_index.
void add_ordered(FreeRegionList* from_list);
// It empties the list by removing all regions from it.
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -60,14 +60,14 @@
if (_head != NULL) {
HeapRegion* curr;
- if (_last != NULL && _last->hrs_index() < hr->hrs_index()) {
+ if (_last != NULL && _last->hrm_index() < hr->hrm_index()) {
curr = _last;
} else {
curr = _head;
}
// Find first entry with a Region Index larger than entry to insert.
- while (curr != NULL && curr->hrs_index() < hr->hrs_index()) {
+ while (curr != NULL && curr->hrm_index() < hr->hrm_index()) {
curr = curr->next();
}
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -453,7 +453,7 @@
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
#if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.",
- card_index, region_id, _hr->hrs_index());
+ card_index, region_id, _hr->hrm_index());
#endif
if (_next->occupied_entries() * 2 > _next->capacity()) {
expand();
@@ -505,7 +505,7 @@
#if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.",
- _hr->hrs_index(), _next->capacity());
+ _hr->hrm_index(), _next->capacity());
#endif
for (size_t i = 0; i < last->capacity(); i++) {
SparsePRTEntry* e = last->entry((int)i);
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -26,7 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
#include "gc_implementation/g1/heapRegion.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
@@ -42,10 +42,10 @@
nonstatic_field(G1HeapRegionTable, _bias, size_t) \
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
\
- nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
- nonstatic_field(HeapRegionSeq, _num_committed, uint) \
+ nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
+ nonstatic_field(HeapRegionManager, _num_committed, uint) \
\
- nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
+ nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
@@ -72,7 +72,7 @@
\
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
declare_type(HeapRegion, G1OffsetTableContigSpace) \
- declare_toplevel_type(HeapRegionSeq) \
+ declare_toplevel_type(HeapRegionManager) \
declare_toplevel_type(HeapRegionSetBase) \
declare_toplevel_type(HeapRegionSetCount) \
declare_toplevel_type(G1MonitoringSupport) \
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -30,6 +30,8 @@
PSGenerationCounters::PSGenerationCounters(const char* name,
int ordinal, int spaces,
+ size_t min_capacity,
+ size_t max_capacity,
PSVirtualSpace* v):
_ps_virtual_space(v) {
@@ -52,11 +54,11 @@
cname = PerfDataManager::counter_name(_name_space, "minCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
- _ps_virtual_space->committed_size(), CHECK);
+ min_capacity, CHECK);
cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
- _ps_virtual_space->reserved_size(), CHECK);
+ max_capacity, CHECK);
cname = PerfDataManager::counter_name(_name_space, "capacity");
_current_size = PerfDataManager::create_variable(SUN_GC, cname,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -41,7 +41,7 @@
public:
PSGenerationCounters(const char* name, int ordinal, int spaces,
- PSVirtualSpace* v);
+ size_t min_capacity, size_t max_capacity, PSVirtualSpace* v);
void update_all() {
assert(_virtual_space == NULL, "Only one should be in use");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -149,8 +149,8 @@
void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
// Generation Counters, generation 'level', 1 subspace
- _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
- virtual_space());
+ _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
+ _max_gen_size, virtual_space());
_space_counters = new SpaceCounters(perf_data_name, 0,
virtual_space()->reserved_size(),
_object_space, _gen_counters);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -101,7 +101,8 @@
}
// Generation Counters - generation 0, 3 subspaces
- _gen_counters = new PSGenerationCounters("new", 0, 3, _virtual_space);
+ _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
+ _max_gen_size, _virtual_space);
// Compute maximum space sizes for performance counters
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -62,11 +62,12 @@
GenerationCounters::GenerationCounters(const char* name,
int ordinal, int spaces,
+ size_t min_capacity, size_t max_capacity,
VirtualSpace* v)
: _virtual_space(v) {
assert(v != NULL, "don't call this constructor if v == NULL");
initialize(name, ordinal, spaces,
- v->committed_size(), v->reserved_size(), v->committed_size());
+ min_capacity, max_capacity, v->committed_size());
}
GenerationCounters::GenerationCounters(const char* name,
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -66,7 +66,7 @@
public:
GenerationCounters(const char* name, int ordinal, int spaces,
- VirtualSpace* v);
+ size_t min_capacity, size_t max_capacity, VirtualSpace* v);
~GenerationCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -214,9 +214,11 @@
_max_eden_size = size - (2*_max_survivor_size);
// allocate the performance counters
+ GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
// Generation counters -- generation 0, 3 subspaces
- _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
+ _gen_counters = new GenerationCounters("new", 0, 3,
+ gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
_gc_counters = new CollectorCounters(policy, 0);
_eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
--- a/hotspot/src/share/vm/memory/filemap.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/filemap.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -445,7 +445,7 @@
// close and remove the file. See bug 6372906.
close();
remove(_full_path);
- fail_stop("Unable to write to shared archive file.", NULL);
+ fail_stop("Unable to write to shared archive file.");
}
}
_file_offset += nbytes;
@@ -463,7 +463,7 @@
// that the written file is the correct length.
_file_offset -= 1;
if (lseek(_fd, _file_offset, SEEK_SET) < 0) {
- fail_stop("Unable to seek.", NULL);
+ fail_stop("Unable to seek.");
}
char zero = 0;
write_bytes(&zero, 1);
@@ -534,7 +534,7 @@
// other reserved memory (like the code cache).
ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
if (!rs.is_reserved()) {
- fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
+ fail_continue("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr);
return rs;
}
// the reserved virtual memory is for mapping class data sharing archive
@@ -558,7 +558,7 @@
requested_addr, size, si->_read_only,
si->_allow_exec);
if (base == NULL || base != si->_base) {
- fail_continue(err_msg("Unable to map %s shared space at required address.", shared_region_name[i]));
+ fail_continue("Unable to map %s shared space at required address.", shared_region_name[i]);
return NULL;
}
#ifdef _WINDOWS
@@ -584,7 +584,7 @@
void FileMapInfo::assert_mark(bool check) {
if (!check) {
- fail_stop("Mark mismatch while restoring from shared file.", NULL);
+ fail_stop("Mark mismatch while restoring from shared file.");
}
}
@@ -709,7 +709,7 @@
void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
FileMapInfo *map_info = FileMapInfo::current_info();
if (map_info) {
- map_info->fail_continue(msg);
+ map_info->fail_continue("%s", msg);
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
if (map_info->_header->_space[i]._base != NULL) {
map_info->unmap_region(i);
@@ -717,6 +717,6 @@
}
}
} else if (DumpSharedSpaces) {
- fail_stop(msg, NULL);
+ fail_stop("%s", msg);
}
}
--- a/hotspot/src/share/vm/memory/filemap.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/filemap.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -190,8 +190,8 @@
bool remap_shared_readonly_as_readwrite();
// Errors.
- static void fail_stop(const char *msg, ...);
- static void fail_continue(const char *msg, ...);
+ static void fail_stop(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
+ static void fail_continue(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
// Return true if given address is in the mapped shared space.
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
--- a/hotspot/src/share/vm/memory/freeList.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/freeList.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -34,7 +34,6 @@
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#endif // INCLUDE_ALL_GCS
// Free list. A FreeList is used to access a linked list of chunks
@@ -333,5 +332,4 @@
template class FreeList<Metachunk>;
#if INCLUDE_ALL_GCS
template class FreeList<FreeChunk>;
-template class FreeList<G1CodeRootChunk>;
#endif // INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/memory/metaspace.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -3126,6 +3126,8 @@
if (DumpSharedSpaces) {
#if INCLUDE_CDS
+ MetaspaceShared::estimate_regions_size();
+
SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -816,6 +816,7 @@
//tty->print_cr("Preload failed: %s", class_name);
}
}
+ fclose(file);
} else {
char errmsg[JVM_MAXPATHLEN];
os::lasterror(errmsg, JVM_MAXPATHLEN);
@@ -1086,3 +1087,49 @@
}
return true;
}
+
+int MetaspaceShared::count_class(const char* classlist_file) {
+ if (classlist_file == NULL) {
+ return 0;
+ }
+ char class_name[256];
+ int class_count = 0;
+ FILE* file = fopen(classlist_file, "r");
+ if (file != NULL) {
+ while ((fgets(class_name, sizeof class_name, file)) != NULL) {
+ if (*class_name == '#') { // comment
+ continue;
+ }
+ class_count++;
+ }
+ fclose(file);
+ } else {
+ char errmsg[JVM_MAXPATHLEN];
+ os::lasterror(errmsg, JVM_MAXPATHLEN);
+ tty->print_cr("Loading classlist failed: %s", errmsg);
+ exit(1);
+ }
+
+ return class_count;
+}
+
+// the sizes are good for typical large applications that have a lot of shared
+// classes
+void MetaspaceShared::estimate_regions_size() {
+ int class_count = count_class(SharedClassListFile);
+ class_count += count_class(ExtraSharedClassListFile);
+
+ if (class_count > LargeThresholdClassCount) {
+ if (class_count < HugeThresholdClassCount) {
+ SET_ESTIMATED_SIZE(Large, ReadOnly);
+ SET_ESTIMATED_SIZE(Large, ReadWrite);
+ SET_ESTIMATED_SIZE(Large, MiscData);
+ SET_ESTIMATED_SIZE(Large, MiscCode);
+ } else {
+ SET_ESTIMATED_SIZE(Huge, ReadOnly);
+ SET_ESTIMATED_SIZE(Huge, ReadWrite);
+ SET_ESTIMATED_SIZE(Huge, MiscData);
+ SET_ESTIMATED_SIZE(Huge, MiscCode);
+ }
+ }
+}
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -30,6 +30,19 @@
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
+#define LargeSharedArchiveSize (300*M)
+#define HugeSharedArchiveSize (800*M)
+#define ReadOnlyRegionPercentage 0.4
+#define ReadWriteRegionPercentage 0.55
+#define MiscDataRegionPercentage 0.03
+#define MiscCodeRegionPercentage 0.02
+#define LargeThresholdClassCount 5000
+#define HugeThresholdClassCount 40000
+
+#define SET_ESTIMATED_SIZE(type, region) \
+ Shared ##region## Size = FLAG_IS_DEFAULT(Shared ##region## Size) ? \
+ (uintx)(type ## SharedArchiveSize * region ## RegionPercentage) : Shared ## region ## Size
+
class FileMapInfo;
// Class Data Sharing Support
@@ -112,5 +125,8 @@
static void link_one_shared_class(Klass* obj, TRAPS);
static void check_one_shared_class(Klass* obj);
static void link_and_cleanup_shared_classes(TRAPS);
+
+ static int count_class(const char* classlist_file);
+ static void estimate_regions_size() NOT_CDS_RETURN;
};
#endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -53,9 +53,11 @@
// initialize performance counters
const char* gen_name = "old";
+ GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
// Generation Counters -- generation 1, 1 subspace
- _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
+ _gen_counters = new GenerationCounters(gen_name, 1, 1,
+ gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
_gc_counters = new CollectorCounters("MSC", 1);
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -78,7 +78,6 @@
set_dimension(1);
set_higher_dimension(NULL);
set_lower_dimension(NULL);
- set_component_mirror(NULL);
// Arrays don't add any new methods, so their vtable is the same size as
// the vtable of klass Object.
int vtable_size = Universe::base_vtable_size();
@@ -160,14 +159,6 @@
}
}
-// GC support
-
-void ArrayKlass::oops_do(OopClosure* cl) {
- Klass::oops_do(cl);
-
- cl->do_oop(adr_component_mirror());
-}
-
// JVM support
jint ArrayKlass::compute_modifier_flags(TRAPS) const {
@@ -182,8 +173,6 @@
void ArrayKlass::remove_unshareable_info() {
Klass::remove_unshareable_info();
- // Clear the java mirror
- set_component_mirror(NULL);
}
void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
@@ -217,10 +206,6 @@
void ArrayKlass::verify_on(outputStream* st) {
Klass::verify_on(st);
-
- if (component_mirror() != NULL) {
- guarantee(component_mirror()->klass() != NULL, "should have a class");
- }
}
void ArrayKlass::oop_verify_on(oop obj, outputStream* st) {
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -39,7 +39,6 @@
Klass* volatile _higher_dimension; // Refers the (n+1)'th-dimensional array (if present).
Klass* volatile _lower_dimension; // Refers the (n-1)'th-dimensional array (if present).
int _vtable_len; // size of vtable for this klass
- oop _component_mirror; // component type, as a java/lang/Class
protected:
// Constructors
@@ -70,13 +69,6 @@
// type of elements (T_OBJECT for both oop arrays and array-arrays)
BasicType element_type() const { return layout_helper_element_type(layout_helper()); }
- oop component_mirror() const { return _component_mirror; }
- void set_component_mirror(oop m) { klass_oop_store(&_component_mirror, m); }
- oop* adr_component_mirror() { return (oop*)&this->_component_mirror;}
-
- // Compiler/Interpreter offset
- static ByteSize component_mirror_offset() { return in_ByteSize(offset_of(ArrayKlass, _component_mirror)); }
-
virtual Klass* java_super() const;//{ return SystemDictionary::Object_klass(); }
// Allocation
@@ -122,9 +114,6 @@
void array_klasses_do(void f(Klass* k));
void array_klasses_do(void f(Klass* k, TRAPS), TRAPS);
- // GC support
- virtual void oops_do(OopClosure* cl);
-
// Return a handle.
static void complete_create_array_klass(ArrayKlass* k, KlassHandle super_klass, TRAPS);
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -68,7 +68,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -42,7 +42,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -38,7 +38,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
--- a/hotspot/src/share/vm/oops/klass.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/oops/klass.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -565,7 +565,7 @@
TRACE_DEFINE_KLASS_METHODS;
// garbage collection support
- virtual void oops_do(OopClosure* cl);
+ void oops_do(OopClosure* cl);
// Iff the class loader (or mirror for anonymous classes) is alive the
// Klass is considered alive.
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -51,7 +51,7 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
--- a/hotspot/src/share/vm/opto/library_call.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -845,7 +845,6 @@
case vmIntrinsics::_isArray:
case vmIntrinsics::_isPrimitive:
case vmIntrinsics::_getSuperclass:
- case vmIntrinsics::_getComponentType:
case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
case vmIntrinsics::_floatToRawIntBits:
@@ -3412,10 +3411,6 @@
prim_return_value = null();
return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
break;
- case vmIntrinsics::_getComponentType:
- prim_return_value = null();
- return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
- break;
case vmIntrinsics::_getClassAccessFlags:
prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
return_type = TypeInt::INT; // not bool! 6297094
@@ -3532,17 +3527,6 @@
}
break;
- case vmIntrinsics::_getComponentType:
- if (generate_array_guard(kls, region) != NULL) {
- // Be sure to pin the oop load to the guard edge just created:
- Node* is_array_ctrl = region->in(region->req()-1);
- Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
- Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
- phi->add_req(cmo);
- }
- query_value = null(); // non-array case is null
- break;
-
case vmIntrinsics::_getClassAccessFlags:
p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
--- a/hotspot/src/share/vm/opto/memnode.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/opto/memnode.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -1799,13 +1799,6 @@
}
const Type* aift = load_array_final_field(tkls, klass);
if (aift != NULL) return aift;
- if (tkls->offset() == in_bytes(ArrayKlass::component_mirror_offset())
- && klass->is_array_klass()) {
- // The field is ArrayKlass::_component_mirror. Return its (constant) value.
- // (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.)
- assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror");
- return TypeInstPtr::make(klass->as_array_klass()->component_mirror());
- }
if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
// The field is Klass::_java_mirror. Return its (constant) value.
// (Folds up the 2nd indirection in anObjConstant.getClass().)
@@ -2200,18 +2193,15 @@
}
// Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
- // Simplify ak.component_mirror.array_klass to plain ak, ak an ArrayKlass.
// See inline_native_Class_query for occurrences of these patterns.
// Java Example: x.getClass().isAssignableFrom(y)
- // Java Example: Array.newInstance(x.getClass().getComponentType(), n)
//
// This improves reflective code, often making the Class
// mirror go completely dead. (Current exception: Class
// mirrors may appear in debug info, but we could clean them out by
// introducing a new debug info operator for Klass*.java_mirror).
if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass()
- && (offset == java_lang_Class::klass_offset_in_bytes() ||
- offset == java_lang_Class::array_klass_offset_in_bytes())) {
+ && offset == java_lang_Class::klass_offset_in_bytes()) {
// We are loading a special hidden field from a Class mirror,
// the field which points to its Klass or ArrayKlass metaobject.
if (base->is_Load()) {
@@ -2223,9 +2213,6 @@
&& adr2->is_AddP()
) {
int mirror_field = in_bytes(Klass::java_mirror_offset());
- if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
- mirror_field = in_bytes(ArrayKlass::component_mirror_offset());
- }
if (tkls->offset() == mirror_field) {
return adr2->in(AddPNode::Base);
}
--- a/hotspot/src/share/vm/prims/jvm.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/prims/jvm.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -1403,14 +1403,6 @@
JVM_END
-JVM_ENTRY(jclass, JVM_GetComponentType(JNIEnv *env, jclass cls))
- JVMWrapper("JVM_GetComponentType");
- oop mirror = JNIHandles::resolve_non_null(cls);
- oop result = Reflection::array_component_type(mirror, CHECK_NULL);
- return (jclass) JNIHandles::make_local(env, result);
-JVM_END
-
-
JVM_ENTRY(jint, JVM_GetClassModifiers(JNIEnv *env, jclass cls))
JVMWrapper("JVM_GetClassModifiers");
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
--- a/hotspot/src/share/vm/prims/jvm.h Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/prims/jvm.h Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -483,9 +483,6 @@
JNIEXPORT jboolean JNICALL
JVM_IsPrimitiveClass(JNIEnv *env, jclass cls);
-JNIEXPORT jclass JNICALL
-JVM_GetComponentType(JNIEnv *env, jclass cls);
-
JNIEXPORT jint JNICALL
JVM_GetClassModifiers(JNIEnv *env, jclass cls);
--- a/hotspot/src/share/vm/runtime/java.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -430,6 +430,8 @@
}
}
+jint volatile vm_getting_terminated = 0;
+
// Note: before_exit() can be executed only once, if more than one threads
// are trying to shutdown the VM at the same time, only one thread
// can run before_exit() and all other threads must wait.
@@ -460,6 +462,8 @@
}
}
+ OrderAccess::release_store(&vm_getting_terminated, 1);
+
// The only difference between this and Win32's _onexit procs is that
// this version is invoked before any threads get killed.
ExitProc* current = exit_procs;
--- a/hotspot/src/share/vm/runtime/reflection.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/runtime/reflection.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -390,7 +390,7 @@
return NULL;
}
- oop result = ArrayKlass::cast(klass)->component_mirror();
+ oop result = java_lang_Class::component_mirror(mirror);
#ifdef ASSERT
oop result2 = NULL;
if (ArrayKlass::cast(klass)->dimension() == 1) {
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -274,7 +274,6 @@
volatile_nonstatic_field(ArrayKlass, _higher_dimension, Klass*) \
volatile_nonstatic_field(ArrayKlass, _lower_dimension, Klass*) \
nonstatic_field(ArrayKlass, _vtable_len, int) \
- nonstatic_field(ArrayKlass, _component_mirror, oop) \
nonstatic_field(CompiledICHolder, _holder_method, Method*) \
nonstatic_field(CompiledICHolder, _holder_klass, Klass*) \
nonstatic_field(ConstantPool, _tags, Array<u1>*) \
--- a/hotspot/src/share/vm/services/mallocTracker.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/services/mallocTracker.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -171,8 +171,9 @@
// Total malloc'd memory used by arenas
size_t total_arena() const;
- inline size_t thread_count() {
- return by_type(mtThreadStack)->malloc_count();
+ inline size_t thread_count() const {
+ MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
+ return s->by_type(mtThreadStack)->malloc_count();
}
void reset();
--- a/hotspot/src/share/vm/services/memBaseline.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/services/memBaseline.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -70,15 +70,13 @@
*/
class MallocAllocationSiteWalker : public MallocSiteWalker {
private:
- SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
- _malloc_sites;
+ SortedLinkedList<MallocSite, compare_malloc_size> _malloc_sites;
size_t _count;
// Entries in MallocSiteTable with size = 0 and count = 0,
// when the malloc site is not longer there.
public:
- MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) {
- }
+ MallocAllocationSiteWalker() : _count(0) { }
inline size_t count() const { return _count; }
@@ -109,13 +107,12 @@
// Walk all virtual memory regions for baselining
class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
private:
- SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA>
+ SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base>
_virtual_memory_regions;
size_t _count;
public:
- VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) {
- }
+ VirtualMemoryAllocationWalker() : _count(0) { }
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
@@ -136,39 +133,30 @@
bool MemBaseline::baseline_summary() {
- assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset");
- assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset");
-
- _malloc_memory_snapshot = new (arena()) MallocMemorySnapshot();
- _virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot();
- if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) {
- return false;
- }
- MallocMemorySummary::snapshot(_malloc_memory_snapshot);
- VirtualMemorySummary::snapshot(_virtual_memory_snapshot);
+ MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
+ VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
return true;
}
bool MemBaseline::baseline_allocation_sites() {
- assert(arena() != NULL, "Just check");
// Malloc allocation sites
- MallocAllocationSiteWalker malloc_walker(arena());
+ MallocAllocationSiteWalker malloc_walker;
if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
return false;
}
- _malloc_sites.set_head(malloc_walker.malloc_sites()->head());
+ _malloc_sites.move(malloc_walker.malloc_sites());
// The malloc sites are collected in size order
_malloc_sites_order = by_size;
// Virtual memory allocation sites
- VirtualMemoryAllocationWalker virtual_memory_walker(arena());
+ VirtualMemoryAllocationWalker virtual_memory_walker;
if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
return false;
}
// Virtual memory allocations are collected in call stack order
- _virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head());
+ _virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations());
if (!aggregate_virtual_memory_allocation_sites()) {
return false;
@@ -180,11 +168,6 @@
}
bool MemBaseline::baseline(bool summaryOnly) {
- if (arena() == NULL) {
- _arena = new (std::nothrow, mtNMT) Arena(mtNMT);
- if (arena() == NULL) return false;
- }
-
reset();
_class_count = InstanceKlass::number_of_instance_classes();
@@ -211,8 +194,7 @@
}
bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
- SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA>
- allocation_sites(arena());
+ SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites;
VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
const ReservedMemoryRegion* rgn;
@@ -230,12 +212,12 @@
site->commit_memory(rgn->committed_size());
}
- _virtual_memory_sites.set_head(allocation_sites.head());
+ _virtual_memory_sites.move(&allocation_sites);
return true;
}
MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
- assert(!_malloc_sites.is_empty(), "Detail baseline?");
+ assert(!_malloc_sites.is_empty(), "Not detail baseline");
switch(order) {
case by_size:
malloc_sites_to_size_order();
@@ -251,7 +233,7 @@
}
VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
- assert(!_virtual_memory_sites.is_empty(), "Detail baseline?");
+ assert(!_virtual_memory_sites.is_empty(), "Not detail baseline");
switch(order) {
case by_size:
virtual_memory_sites_to_size_order();
@@ -270,8 +252,7 @@
// Sorting allocations sites in different orders
void MemBaseline::malloc_sites_to_size_order() {
if (_malloc_sites_order != by_size) {
- SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<MallocSite, compare_malloc_size> tmp;
// Add malloc sites to sorted linked list to sort into size order
tmp.move(&_malloc_sites);
@@ -283,8 +264,7 @@
void MemBaseline::malloc_sites_to_allocation_site_order() {
if (_malloc_sites_order != by_site) {
- SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<MallocSite, compare_malloc_site> tmp;
// Add malloc sites to sorted linked list to sort into site (address) order
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
@@ -295,8 +275,7 @@
void MemBaseline::virtual_memory_sites_to_size_order() {
if (_virtual_memory_sites_order != by_size) {
- SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size> tmp;
tmp.move(&_virtual_memory_sites);
@@ -308,10 +287,9 @@
void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
if (_virtual_memory_sites_order != by_size) {
- SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site> tmp;
- tmp.add(&_virtual_memory_sites);
+ tmp.move(&_virtual_memory_sites);
_virtual_memory_sites.set_head(tmp.head());
tmp.set_head(NULL);
--- a/hotspot/src/share/vm/services/memBaseline.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/services/memBaseline.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -61,28 +61,22 @@
};
private:
- // All baseline data is stored in this arena
- Arena* _arena;
-
// Summary information
- MallocMemorySnapshot* _malloc_memory_snapshot;
- VirtualMemorySnapshot* _virtual_memory_snapshot;
+ MallocMemorySnapshot _malloc_memory_snapshot;
+ VirtualMemorySnapshot _virtual_memory_snapshot;
size_t _class_count;
// Allocation sites information
// Malloc allocation sites
- LinkedListImpl<MallocSite, ResourceObj::ARENA>
- _malloc_sites;
+ LinkedListImpl<MallocSite> _malloc_sites;
// All virtual memory allocations
- LinkedListImpl<ReservedMemoryRegion, ResourceObj::ARENA>
- _virtual_memory_allocations;
+ LinkedListImpl<ReservedMemoryRegion> _virtual_memory_allocations;
// Virtual memory allocations by allocation sites, always in by_address
// order
- LinkedListImpl<VirtualMemoryAllocationSite, ResourceObj::ARENA>
- _virtual_memory_sites;
+ LinkedListImpl<VirtualMemoryAllocationSite> _virtual_memory_sites;
SortingOrder _malloc_sites_order;
SortingOrder _virtual_memory_sites_order;
@@ -93,30 +87,23 @@
// create a memory baseline
MemBaseline():
_baseline_type(Not_baselined),
- _class_count(0),
- _arena(NULL),
- _malloc_memory_snapshot(NULL),
- _virtual_memory_snapshot(NULL),
- _malloc_sites(NULL) {
+ _class_count(0) {
}
~MemBaseline() {
reset();
- if (_arena != NULL) {
- delete _arena;
- }
}
bool baseline(bool summaryOnly = true);
BaselineType baseline_type() const { return _baseline_type; }
- MallocMemorySnapshot* malloc_memory_snapshot() const {
- return _malloc_memory_snapshot;
+ MallocMemorySnapshot* malloc_memory_snapshot() {
+ return &_malloc_memory_snapshot;
}
- VirtualMemorySnapshot* virtual_memory_snapshot() const {
- return _virtual_memory_snapshot;
+ VirtualMemorySnapshot* virtual_memory_snapshot() {
+ return &_virtual_memory_snapshot;
}
MallocSiteIterator malloc_sites(SortingOrder order);
@@ -133,10 +120,8 @@
// memory
size_t total_reserved_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_virtual_memory_snapshot != NULL, "No virtual memory snapshot");
- assert(_malloc_memory_snapshot != NULL, "No malloc memory snapshot");
- size_t amount = _malloc_memory_snapshot->total() +
- _virtual_memory_snapshot->total_reserved();
+ size_t amount = _malloc_memory_snapshot.total() +
+ _virtual_memory_snapshot.total_reserved();
return amount;
}
@@ -144,32 +129,30 @@
// virtual memory
size_t total_committed_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_virtual_memory_snapshot != NULL,
- "Not a snapshot");
- size_t amount = _malloc_memory_snapshot->total() +
- _virtual_memory_snapshot->total_committed();
+ size_t amount = _malloc_memory_snapshot.total() +
+ _virtual_memory_snapshot.total_committed();
return amount;
}
size_t total_arena_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_malloc_memory_snapshot != NULL, "Not yet baselined");
- return _malloc_memory_snapshot->total_arena();
+ return _malloc_memory_snapshot.total_arena();
}
size_t malloc_tracking_overhead() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- return _malloc_memory_snapshot->malloc_overhead()->size();
+ MemBaseline* bl = const_cast<MemBaseline*>(this);
+ return bl->_malloc_memory_snapshot.malloc_overhead()->size();
}
- const MallocMemory* malloc_memory(MEMFLAGS flag) const {
- assert(_malloc_memory_snapshot != NULL, "Not a snapshot");
- return _malloc_memory_snapshot->by_type(flag);
+ MallocMemory* malloc_memory(MEMFLAGS flag) {
+ assert(baseline_type() != Not_baselined, "Not yet baselined");
+ return _malloc_memory_snapshot.by_type(flag);
}
- const VirtualMemory* virtual_memory(MEMFLAGS flag) const {
- assert(_virtual_memory_snapshot != NULL, "Not a snapshot");
- return _virtual_memory_snapshot->by_type(flag);
+ VirtualMemory* virtual_memory(MEMFLAGS flag) {
+ assert(baseline_type() != Not_baselined, "Not yet baselined");
+ return _virtual_memory_snapshot.by_type(flag);
}
@@ -180,24 +163,19 @@
size_t thread_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_malloc_memory_snapshot != NULL, "Baselined?");
- return _malloc_memory_snapshot->thread_count();
+ return _malloc_memory_snapshot.thread_count();
}
// reset the baseline for reuse
void reset() {
_baseline_type = Not_baselined;
- _malloc_memory_snapshot = NULL;
- _virtual_memory_snapshot = NULL;
+ _malloc_memory_snapshot.reset();
+ _virtual_memory_snapshot.reset();
_class_count = 0;
- _malloc_sites = NULL;
- _virtual_memory_sites = NULL;
- _virtual_memory_allocations = NULL;
-
- if (_arena != NULL) {
- _arena->destruct_contents();
- }
+ _malloc_sites.clear();
+ _virtual_memory_sites.clear();
+ _virtual_memory_allocations.clear();
}
private:
@@ -210,8 +188,6 @@
// Aggregate virtual memory allocation by allocation sites
bool aggregate_virtual_memory_allocation_sites();
- Arena* arena() { return _arena; }
-
// Sorting allocation sites in different orders
// Sort allocation sites in size order
void malloc_sites_to_size_order();
--- a/hotspot/src/share/vm/utilities/hashtable.cpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/utilities/hashtable.cpp Tue Sep 09 16:14:40 2014 +0200
@@ -37,21 +37,22 @@
#include "utilities/numberSeq.hpp"
-// This is a generic hashtable, designed to be used for the symbol
-// and string tables.
-//
-// It is implemented as an open hash table with a fixed number of buckets.
-//
-// %note:
-// - HashtableEntrys are allocated in blocks to reduce the space overhead.
+// This hashtable is implemented as an open hash table with a fixed number of buckets.
-template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
- BasicHashtableEntry<F>* entry;
-
- if (_free_list) {
+template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
+ BasicHashtableEntry<F>* entry = NULL;
+ if (_free_list != NULL) {
entry = _free_list;
_free_list = _free_list->next();
- } else {
+ }
+ return entry;
+}
+
+// HashtableEntrys are allocated in blocks to reduce the space overhead.
+template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
+ BasicHashtableEntry<F>* entry = new_entry_free_list();
+
+ if (entry == NULL) {
if (_first_free_entry + _entry_size >= _end_block) {
int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
int len = _entry_size * block_size;
@@ -84,9 +85,9 @@
// This is somewhat an arbitrary heuristic but if one bucket gets to
// rehash_count which is currently 100, there's probably something wrong.
-template <MEMFLAGS F> bool BasicHashtable<F>::check_rehash_table(int count) {
- assert(table_size() != 0, "underflow");
- if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) {
+template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
+ assert(this->table_size() != 0, "underflow");
+ if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
// Set a flag for the next safepoint, which should be at some guaranteed
// safepoint interval.
return true;
@@ -94,13 +95,13 @@
return false;
}
-template <class T, MEMFLAGS F> juint Hashtable<T, F>::_seed = 0;
+template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
// Create a new table and using alternate hash code, populate the new table
// with the existing elements. This can be used to change the hash code
// and could in the future change the size of the table.
-template <class T, MEMFLAGS F> void Hashtable<T, F>::move_to(Hashtable<T, F>* new_table) {
+template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
// Initialize the global seed for hashing.
_seed = AltHashing::compute_seed();
@@ -110,7 +111,7 @@
// Iterate through the table and create a new entry for the new table
for (int i = 0; i < new_table->table_size(); ++i) {
- for (HashtableEntry<T, F>* p = bucket(i); p != NULL; ) {
+ for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
HashtableEntry<T, F>* next = p->next();
T string = p->literal();
// Use alternate hashing algorithm on the symbol in the first table
@@ -239,11 +240,11 @@
}
}
-template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(Symbol *symbol) {
+template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
return symbol->size() * HeapWordSize;
}
-template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(oop oop) {
+template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
// NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
// and the String.value array is shared by several Strings. However, starting from JDK8,
// the String.value array is not shared anymore.
@@ -256,12 +257,12 @@
// Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
// add a new function Hashtable<T, F>::literal_size(MyNewType lit)
-template <class T, MEMFLAGS F> void Hashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
+template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
NumberSeq summary;
int literal_bytes = 0;
for (int i = 0; i < this->table_size(); ++i) {
int count = 0;
- for (HashtableEntry<T, F>* e = bucket(i);
+ for (HashtableEntry<T, F>* e = this->bucket(i);
e != NULL; e = e->next()) {
count++;
literal_bytes += literal_size(e->literal());
@@ -271,7 +272,7 @@
double num_buckets = summary.num();
double num_entries = summary.sum();
- int bucket_bytes = (int)num_buckets * sizeof(bucket(0));
+ int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>);
int total_bytes = literal_bytes + bucket_bytes + entry_bytes;
@@ -354,12 +355,20 @@
// Explicitly instantiate these types
+#if INCLUDE_ALL_GCS
+template class Hashtable<nmethod*, mtGC>;
+template class HashtableEntry<nmethod*, mtGC>;
+template class BasicHashtable<mtGC>;
+#endif
template class Hashtable<ConstantPool*, mtClass>;
+template class RehashableHashtable<Symbol*, mtSymbol>;
+template class RehashableHashtable<oopDesc*, mtSymbol>;
template class Hashtable<Symbol*, mtSymbol>;
template class Hashtable<Klass*, mtClass>;
template class Hashtable<oop, mtClass>;
#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
template class Hashtable<oop, mtSymbol>;
+template class RehashableHashtable<oop, mtSymbol>;
#endif // SOLARIS || CHECK_UNHANDLED_OOPS
template class Hashtable<oopDesc*, mtSymbol>;
template class Hashtable<Symbol*, mtClass>;
--- a/hotspot/src/share/vm/utilities/hashtable.hpp Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/src/share/vm/utilities/hashtable.hpp Tue Sep 09 16:14:40 2014 +0200
@@ -178,11 +178,6 @@
void verify_lookup_length(double load);
#endif
- enum {
- rehash_count = 100,
- rehash_multiple = 60
- };
-
void initialize(int table_size, int entry_size, int number_of_entries);
// Accessor
@@ -194,12 +189,12 @@
// The following method is not MT-safe and must be done under lock.
BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
+ // Attempt to get an entry from the free list
+ BasicHashtableEntry<F>* new_entry_free_list();
+
// Table entry management
BasicHashtableEntry<F>* new_entry(unsigned int hashValue);
- // Check that the table is unbalanced
- bool check_rehash_table(int count);
-
// Used when moving the entry to another table
// Clean up links, but do not add to free_list
void unlink_entry(BasicHashtableEntry<F>* entry) {
@@ -277,8 +272,30 @@
return (HashtableEntry<T, F>**)BasicHashtable<F>::bucket_addr(i);
}
+};
+
+template <class T, MEMFLAGS F> class RehashableHashtable : public Hashtable<T, F> {
+ protected:
+
+ enum {
+ rehash_count = 100,
+ rehash_multiple = 60
+ };
+
+ // Check that the table is unbalanced
+ bool check_rehash_table(int count);
+
+ public:
+ RehashableHashtable(int table_size, int entry_size)
+ : Hashtable<T, F>(table_size, entry_size) { }
+
+ RehashableHashtable(int table_size, int entry_size,
+ HashtableBucket<F>* buckets, int number_of_entries)
+ : Hashtable<T, F>(table_size, entry_size, buckets, number_of_entries) { }
+
+
// Function to move these elements into the new table.
- void move_to(Hashtable<T, F>* new_table);
+ void move_to(RehashableHashtable<T, F>* new_table);
static bool use_alternate_hashcode() { return _seed != 0; }
static juint seed() { return _seed; }
@@ -292,7 +309,6 @@
static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;}
static int literal_size(Klass *k) {Unimplemented(); return 0;}
-public:
void dump_table(outputStream* st, const char *table_name);
private:
--- a/hotspot/test/gc/g1/TestHumongousShrinkHeap.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/gc/g1/TestHumongousShrinkHeap.java Tue Sep 09 16:14:40 2014 +0200
@@ -22,9 +22,8 @@
*/
/**
- * @ignore 8041506, 8041946, 8042051
* @test TestHumongousShrinkHeap
- * @bug 8036025
+ * @bug 8036025 8056043
* @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects
* @library /testlibrary
* @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 -XX:+UseG1GC -XX:G1HeapRegionSize=1M -verbose:gc TestHumongousShrinkHeap
--- a/hotspot/test/gc/g1/TestStringDeduplicationTools.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/gc/g1/TestStringDeduplicationTools.java Tue Sep 09 16:14:40 2014 +0200
@@ -129,8 +129,19 @@
return list;
}
+ /**
+ * Verifies that the given list contains expected number of unique strings.
+ * It's possible that deduplication hasn't completed yet, so the method
+ * will perform several attempts to check with a little pause between.
+ * The method throws RuntimeException to signal that verification failed.
+ *
+ * @param list strings to check
+ * @param uniqueExpected expected number of unique strings
+ * @throws RuntimeException if check fails
+ */
private static void verifyStrings(ArrayList<String> list, int uniqueExpected) {
- for (;;) {
+ boolean passed = false;
+ for (int attempts = 0; attempts < 10; attempts++) {
// Check number of deduplicated strings
ArrayList<Object> unique = new ArrayList<Object>(uniqueExpected);
for (String string: list) {
@@ -153,11 +164,11 @@
", uniqueExpected=" + uniqueExpected);
if (unique.size() == uniqueExpected) {
- System.out.println("Deduplication completed");
+ System.out.println("Deduplication completed (as fast as " + attempts + " iterations)");
+ passed = true;
break;
} else {
System.out.println("Deduplication not completed, waiting...");
-
// Give the deduplication thread time to complete
try {
Thread.sleep(1000);
@@ -166,6 +177,9 @@
}
}
}
+ if (!passed) {
+ throw new RuntimeException("String verification failed");
+ }
}
private static OutputAnalyzer runTest(String... extraArgs) throws Exception {
@@ -247,14 +261,20 @@
forceDeduplication(ageThreshold, FullGC);
// Wait for deduplication to occur
- while (getValue(dupString1) != getValue(baseString)) {
+ for (int attempts = 0; attempts < 10; attempts++) {
+ if (getValue(dupString1) == getValue(baseString)) {
+ break;
+ }
System.out.println("Waiting...");
try {
- Thread.sleep(100);
+ Thread.sleep(1000);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
+ if (getValue(dupString1) != getValue(baseString)) {
+ throw new RuntimeException("Deduplication has not occurred");
+ }
// Create a new duplicate of baseString
StringBuilder sb2 = new StringBuilder(baseString);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/whitebox/TestWBGC.java Tue Sep 09 16:14:40 2014 +0200
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestWBGC
+ * @bug 8055098
+ * @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestWBGC
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestWBGC
+ */
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class TestWBGC {
+
+ public static void main(String args[]) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ true,
+ "-Xbootclasspath/a:.",
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-XX:MaxTenuringThreshold=1",
+ "-XX:+PrintGC",
+ GCYoungTest.class.getName());
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ System.out.println(output.getStdout());
+ output.shouldHaveExitValue(0);
+ output.shouldContain("WhiteBox Initiated Young GC");
+ output.shouldNotContain("Full");
+ // To be sure that we don't provoke Full GC additionaly to young
+ }
+
+ public static class GCYoungTest {
+ static WhiteBox wb = WhiteBox.getWhiteBox();
+ public static Object obj;
+
+ public static void main(String args[]) {
+ obj = new Object();
+ Asserts.assertFalse(wb.isObjectInOldGen(obj));
+ wb.youngGC();
+ wb.youngGC();
+ // 2 young GC is needed to promote object into OldGen
+ Asserts.assertTrue(wb.isObjectInOldGen(obj));
+ }
+ }
+}
--- a/hotspot/test/runtime/CompressedOops/CompressedClassPointers.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/CompressedOops/CompressedClassPointers.java Tue Sep 09 16:14:40 2014 +0200
@@ -26,7 +26,6 @@
* @bug 8024927
* @summary Testing address of compressed class pointer space as best as possible.
* @library /testlibrary
- * @ignore 8055164
*/
import com.oracle.java.testlibrary.*;
@@ -89,7 +88,6 @@
"-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("HeapBaseMinAddress must be at least");
- output.shouldContain("HotSpot");
output.shouldHaveExitValue(0);
}
--- a/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java Tue Sep 09 16:14:40 2014 +0200
@@ -26,7 +26,6 @@
* @key nmt
* @summary Empty argument to NMT should result in an informative error message
* @library /testlibrary
- * @ignore 8055051
*/
import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/JcmdDetailDiff.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/NMT/JcmdDetailDiff.java Tue Sep 09 16:14:40 2014 +0200
@@ -62,21 +62,18 @@
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
- output.shouldContain("WB_NMTReserveMemory");
wb.NMTCommitMemory(addr, commitSize);
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
- output.shouldContain("WB_NMTReserveMemory");
wb.NMTUncommitMemory(addr, commitSize);
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
- output.shouldContain("WB_NMTReserveMemory");
wb.NMTReleaseMemory(addr, reserveSize);
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
--- a/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Tue Sep 09 16:14:40 2014 +0200
@@ -22,10 +22,9 @@
*/
/*
- * @key stress
* @test
* @summary Test corner case that overflows malloc site hashtable bucket
- * @key nmt jcmd
+ * @key nmt jcmd stress
* @library /testlibrary /testlibrary/whitebox
* @ignore - This test is disabled since it will stress NMT and timeout during normal testing
* @build MallocSiteHashOverflow
--- a/hotspot/test/runtime/NMT/MallocStressTest.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/NMT/MallocStressTest.java Tue Sep 09 16:14:40 2014 +0200
@@ -22,10 +22,9 @@
*/
/*
- * @key stress
* @test
* @summary Stress test for malloc tracking
- * @key nmt jcmd
+ * @key nmt jcmd stress
* @library /testlibrary /testlibrary/whitebox
* @build MallocStressTest
* @ignore - This test is disabled since it will stress NMT and timeout during normal testing
--- a/hotspot/test/runtime/NMT/NMTWithCDS.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/NMT/NMTWithCDS.java Tue Sep 09 16:14:40 2014 +0200
@@ -34,14 +34,15 @@
public static void main(String[] args) throws Exception {
ProcessBuilder pb;
- pb = ProcessTools.createJavaProcessBuilder("-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
+ pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
try {
output.shouldContain("Loading classes to share");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
+ "-XX:+UnlockDiagnosticVMOptions", "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("sharing");
output.shouldHaveExitValue(0);
--- a/hotspot/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java Tue Sep 09 16:14:40 2014 +0200
@@ -26,7 +26,6 @@
* @summary Test reserve/commit/uncommit/release of virtual memory and that we track it correctly
* @key nmt jcmd
* @library /testlibrary /testlibrary/whitebox
- * @ignore
* @build VirtualAllocCommitUncommitRecommit
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocCommitUncommitRecommit
@@ -43,8 +42,8 @@
public static void main(String args[]) throws Exception {
OutputAnalyzer output;
- long commitSize = 4 * 1024; // 4KB
- long reserveSize = 1024 * 1024; // 1024KB
+ long commitSize = 128 * 1024; // 128KB
+ long reserveSize = 4 * 1024 * 1024; // 4096KB
long addr;
String pid = Integer.toString(ProcessTools.getProcessId());
@@ -63,11 +62,11 @@
"VM.native_memory", "detail" });
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=0KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
long addrA = addr;
@@ -84,24 +83,24 @@
wb.NMTCommitMemory(addrD, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=512KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// uncommit BC
wb.NMTUncommitMemory(addrB, commitSize);
wb.NMTUncommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=8KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=256KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// commit EF
@@ -109,22 +108,22 @@
wb.NMTCommitMemory(addrF, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=512KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// uncommit A
wb.NMTUncommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=12KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=384KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// commit ABC
@@ -133,11 +132,11 @@
wb.NMTCommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=24KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=768KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// uncommit ABCDEF
@@ -149,11 +148,11 @@
wb.NMTUncommitMemory(addrF, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=0KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// release
@@ -161,6 +160,6 @@
output = new OutputAnalyzer(pb.start());
output.shouldNotContain("Test (reserved=");
output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
- + Long.toHexString(addr + reserveSize) + "\\] reserved");
+ + Long.toHexString(addr + reserveSize) + "\\] reserved 4096KB for Test");
}
}
--- a/hotspot/test/runtime/jsig/Test8017498.sh Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/runtime/jsig/Test8017498.sh Tue Sep 09 16:14:40 2014 +0200
@@ -31,15 +31,14 @@
## @bug 8022301
## @bug 8025519
## @summary sigaction(sig) results in process hang/timed-out if sig is much greater than SIGRTMAX
-## @ignore 8041727
## @run shell/timeout=60 Test8017498.sh
##
-if [ "${TESTSRC}" = "" ]
-then
- TESTSRC=${PWD}
+if [ -z "${TESTSRC}" ]; then
+ TESTSRC="${PWD}"
echo "TESTSRC not set. Using "${TESTSRC}" as default"
fi
+
echo "TESTSRC=${TESTSRC}"
## Adding common setup Variables for running shell tests.
. ${TESTSRC}/../../test_env.sh
@@ -52,13 +51,13 @@
Linux)
echo "Testing on Linux"
gcc_cmd=`which gcc`
- if [ "x$gcc_cmd" == "x" ]; then
+ if [ -z "$gcc_cmd" ]; then
echo "WARNING: gcc not found. Cannot execute test." 2>&1
exit 0;
fi
MY_LD_PRELOAD=${TESTJAVA}${FS}jre${FS}lib${FS}${VM_CPU}${FS}libjsig.so
- if [ "$VM_BITS" == "32" ] && [ "$VM_CPU" != "arm" ] && [ "$VM_CPU" != "ppc" ]; then
- EXTRA_CFLAG=-m32
+ if [ "$VM_BITS" = "32" ] && [ "$VM_CPU" != "arm" ] && [ "$VM_CPU" != "ppc" ]; then
+ EXTRA_CFLAG=-m32
fi
echo MY_LD_PRELOAD = ${MY_LD_PRELOAD}
;;
@@ -70,7 +69,7 @@
THIS_DIR=.
-cp ${TESTSRC}${FS}*.java ${THIS_DIR}
+cp "${TESTSRC}${FS}"*.java "${THIS_DIR}"
${COMPILEJAVA}${FS}bin${FS}javac *.java
$gcc_cmd -DLINUX -fPIC -shared \
@@ -80,16 +79,19 @@
-I${COMPILEJAVA}${FS}include${FS}linux \
${TESTSRC}${FS}TestJNI.c
+if [ $? -ne 0 ] ; then
+ echo "Compile failed, Ignoring failed compilation and forcing the test to pass"
+ exit 0
+fi
+
# run the java test in the background
cmd="LD_PRELOAD=$MY_LD_PRELOAD \
${TESTJAVA}${FS}bin${FS}java \
-Djava.library.path=. -server TestJNI 100"
-echo "$cmd > test.out 2>&1"
-eval $cmd > test.out 2>&1
+echo "$cmd > test.out"
+eval $cmd > test.out
-grep "old handler" test.out > ${NULL}
-if [ $? = 0 ]
-then
+if grep "old handler" test.out > ${NULL}; then
echo "Test Passed"
exit 0
fi
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/Platform.java Mon Sep 08 16:05:48 2014 +0200
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/Platform.java Tue Sep 09 16:14:40 2014 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
private static final String osName = System.getProperty("os.name");
private static final String dataModel = System.getProperty("sun.arch.data.model");
private static final String vmVersion = System.getProperty("java.vm.version");
+ private static final String javaVersion = System.getProperty("java.version");
private static final String osArch = System.getProperty("os.arch");
private static final String vmName = System.getProperty("java.vm.name");
@@ -83,7 +84,8 @@
}
public static boolean isDebugBuild() {
- return vmVersion.toLowerCase().contains("debug");
+ return (vmVersion.toLowerCase().contains("debug") ||
+ javaVersion.toLowerCase().contains("debug"));
}
public static String getVMVersion() {