--- a/hotspot/agent/make/saenv.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/make/saenv.sh Fri Aug 31 16:17:40 2012 -0700
@@ -26,7 +26,7 @@
# This file sets common environment variables for all SA scripts
OS=`uname`
-STARTDIR=`dirname $0`
+STARTDIR=`(cd \`dirname $0 \`; pwd)`
ARCH=`uname -m`
if [ "x$SA_JAVA" = "x" ]; then
--- a/hotspot/agent/make/start-debug-server-proc.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/make/start-debug-server-proc.sh Fri Aug 31 16:17:40 2012 -0700
@@ -25,10 +25,11 @@
. `dirname $0`/saenv.sh
-if [ -f $STARTDIR/sa.jar ] ; then
- CP=$STARTDIR/sa.jar
+if [ -f $STARTDIR/../lib/sa-jdi.jar ] ; then
+ CP=$STARTDIR/../lib/sa-jdi.jar
else
CP=$STARTDIR/../build/classes
fi
-$SA_JAVA -classpath $CP ${OPTIONS} -Djava.rmi.server.codebase=file:/$CP -Djava.security.policy=$STARTDIR\/grantAll.policy sun.jvm.hotspot.DebugServer $*
+$STARTDIR/java -classpath $CP ${OPTIONS} -Djava.rmi.server.codebase=file://$CP -Djava.security.policy=${STARTDIR}/grantAll.policy sun.jvm.hotspot.DebugServer $*
+
--- a/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,11 +55,11 @@
#define THROW_NEW_DEBUGGER_EXCEPTION_(str, value) { throw_new_debugger_exception(env, str); return value; }
#define THROW_NEW_DEBUGGER_EXCEPTION(str) { throw_new_debugger_exception(env, str); return;}
-static void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
+void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
(*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
}
-static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
+struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
jlong ptr = (*env)->GetLongField(env, this_obj, p_ps_prochandle_ID);
return (struct ps_prochandle*)(intptr_t)ptr;
}
@@ -280,6 +280,7 @@
return (err == PS_OK)? array : 0;
}
+#if defined(i386) || defined(ia64) || defined(amd64) || defined(sparc) || defined(sparcv9)
JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0
(JNIEnv *env, jobject this_obj, jint lwp_id) {
@@ -410,3 +411,4 @@
(*env)->ReleaseLongArrayElements(env, array, regs, JNI_COMMIT);
return array;
}
+#endif
--- a/hotspot/agent/src/os/linux/libproc.h Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/os/linux/libproc.h Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,15 @@
#ifndef _LIBPROC_H_
#define _LIBPROC_H_
+#include <jni.h>
#include <unistd.h>
#include <stdint.h>
#include "proc_service.h"
+#if defined(arm) || defined(ppc)
+#include "libproc_md.h"
+#endif
+
#if defined(sparc) || defined(sparcv9)
/*
If _LP64 is defined ptrace.h should be taken from /usr/include/asm-sparc64
@@ -139,4 +144,8 @@
// address->nearest symbol lookup. return NULL for no symbol
const char* symbol_for_pc(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* poffset);
+struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj);
+
+void throw_new_debugger_exception(JNIEnv* env, const char* errMsg);
+
#endif //__LIBPROC_H_
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -549,7 +549,13 @@
machDesc = new MachineDescriptionSPARC32Bit();
}
} else {
- throw new DebuggerException("Linux only supported on x86/ia64/amd64/sparc/sparc64");
+ try {
+ machDesc = (MachineDescription)
+ Class.forName("sun.jvm.hotspot.debugger.MachineDescription" +
+ cpu.toUpperCase()).newInstance();
+ } catch (Exception e) {
+ throw new DebuggerException("Linux not supported on machine type " + cpu);
+ }
}
LinuxDebuggerLocal dbg =
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpotAgent.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpotAgent.java Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -737,9 +737,16 @@
machDesc = new MachineDescriptionSPARC32Bit();
}
} else {
- throw new DebuggerException("Linux only supported on x86/ia64/amd64/sparc/sparc64");
+ try {
+ machDesc = (MachineDescription)
+ Class.forName("sun.jvm.hotspot.debugger.MachineDescription" +
+ cpu.toUpperCase()).newInstance();
+ } catch (Exception e) {
+ throw new DebuggerException("unsupported machine type");
+ }
}
+
// Note we do not use a cache for the local debugger in server
// mode; it will be taken care of on the client side (once remote
// debugging is implemented).
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/ThreadContext.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/ThreadContext.java Fri Aug 31 16:17:40 2012 -0700
@@ -24,6 +24,8 @@
package sun.jvm.hotspot.debugger;
+import sun.jvm.hotspot.debugger.cdbg.*;
+
/** This is a placeholder interface for a thread's context, containing
only integer registers (no floating-point ones). What it contains
is platform-dependent. Not all registers are guaranteed to be
@@ -54,4 +56,6 @@
/** Set the value of the specified register (0..getNumRegisters() -
1) as an Address */
public void setRegisterAsAddress(int index, Address value);
+
+ public CFrame getTopFrame(Debugger dbg);
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/amd64/AMD64ThreadContext.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/amd64/AMD64ThreadContext.java Fri Aug 31 16:17:40 2012 -0700
@@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.amd64;
import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on amd64 platforms; only a sub-portion
* of the context is guaranteed to be present on all operating
@@ -98,6 +99,10 @@
return data[index];
}
+ public CFrame getTopFrame(Debugger dbg) {
+ return null;
+ }
+
/** This can't be implemented in this class since we would have to
* tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/ia64/IA64ThreadContext.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/ia64/IA64ThreadContext.java Fri Aug 31 16:17:40 2012 -0700
@@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.ia64;
import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on ia64 platform; only a sub-portion
of the context is guaranteed to be present on all operating
@@ -172,6 +173,10 @@
return data[index];
}
+ public CFrame getTopFrame(Debugger dbg) {
+ return null;
+ }
+
/** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -107,7 +107,9 @@
if (pc == null) return null;
return new LinuxSPARCCFrame(dbg, sp, pc, LinuxDebuggerLocal.getAddressSize());
} else {
- throw new DebuggerException(cpu + " is not yet supported");
+ // Runtime exception thrown by LinuxThreadContextFactory if unknown cpu
+ ThreadContext context = (ThreadContext) thread.getContext();
+ return context.getTopFrame(dbg);
}
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxThreadContextFactory.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxThreadContextFactory.java Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
package sun.jvm.hotspot.debugger.linux;
+import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.linux.amd64.*;
import sun.jvm.hotspot.debugger.linux.ia64.*;
@@ -41,8 +42,16 @@
return new LinuxIA64ThreadContext(dbg);
} else if (cpu.equals("sparc")) {
return new LinuxSPARCThreadContext(dbg);
- } else {
- throw new RuntimeException("cpu " + cpu + " is not yet supported");
+ } else {
+ try {
+ Class tcc = Class.forName("sun.jvm.hotspot.debugger.linux." +
+ cpu.toLowerCase() + ".Linux" + cpu.toUpperCase() +
+ "ThreadContext");
+ Constructor[] ctcc = tcc.getConstructors();
+ return (ThreadContext)ctcc[0].newInstance(dbg);
+ } catch (Exception e) {
+ throw new RuntimeException("cpu " + cpu + " is not yet supported");
+ }
}
}
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
import java.io.*;
import java.net.*;
import java.util.*;
+import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.debugger.proc.amd64.*;
@@ -86,7 +87,16 @@
pcRegIndex = AMD64ThreadContext.RIP;
fpRegIndex = AMD64ThreadContext.RBP;
} else {
+ try {
+ Class tfc = Class.forName("sun.jvm.hotspot.debugger.proc." +
+ cpu.toLowerCase() + ".Proc" + cpu.toUpperCase() +
+ "ThreadFactory");
+ Constructor[] ctfc = tfc.getConstructors();
+ threadFactory = (ProcThreadFactory)ctfc[0].newInstance(this);
+ } catch (Exception e) {
throw new RuntimeException("Thread access for CPU architecture " + PlatformInfo.getCPU() + " not yet supported");
+ // Note: pcRegIndex and fpRegIndex do not appear to be referenced
+ }
}
if (useCache) {
// Cache portion of the remote process's address space.
@@ -375,7 +385,11 @@
int pagesize = getPageSize0();
if (pagesize == -1) {
// return the hard coded default value.
- pagesize = (PlatformInfo.getCPU().equals("x86"))? 4096 : 8192;
+ if (PlatformInfo.getCPU().equals("sparc") ||
+ PlatformInfo.getCPU().equals("amd64") )
+ pagesize = 8196;
+ else
+ pagesize = 4096;
}
return pagesize;
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
import java.rmi.*;
import java.util.*;
+import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
@@ -70,7 +71,18 @@
cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize);
unalignedAccessesOkay = true;
} else {
- throw new DebuggerException("Thread access for CPU architecture " + cpu + " not yet supported");
+ try {
+ Class tf = Class.forName("sun.jvm.hotspot.debugger.remote." +
+ cpu.toLowerCase() + ".Remote" + cpu.toUpperCase() +
+ "ThreadFactory");
+ Constructor[] ctf = tf.getConstructors();
+ threadFactory = (RemoteThreadFactory)ctf[0].newInstance(this);
+ } catch (Exception e) {
+ throw new DebuggerException("Thread access for CPU architecture " + cpu + " not yet supported");
+ }
+ cachePageSize = 4096;
+ cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize);
+ unalignedAccessesOkay = false;
}
// Cache portion of the remote process's address space.
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/sparc/SPARCThreadContext.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/sparc/SPARCThreadContext.java Fri Aug 31 16:17:40 2012 -0700
@@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.sparc;
import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.*;
/** Currently provides just the minimal information necessary to get
stack traces working. FIXME: currently hardwired for v9 -- will
@@ -124,6 +125,10 @@
return data[index];
}
+ public CFrame getTopFrame(Debugger dbg) {
+ return null;
+ }
+
/** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/x86/X86ThreadContext.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/x86/X86ThreadContext.java Fri Aug 31 16:17:40 2012 -0700
@@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.x86;
import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on x86 platforms; only a sub-portion
of the context is guaranteed to be present on all operating
@@ -109,6 +110,10 @@
return data[index];
}
+ public CFrame getTopFrame(Debugger dbg) {
+ return null;
+ }
+
/** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -91,6 +91,16 @@
access = new LinuxAMD64JavaThreadPDAccess();
} else if (cpu.equals("sparc")) {
access = new LinuxSPARCJavaThreadPDAccess();
+ } else {
+ try {
+ access = (JavaThreadPDAccess)
+ Class.forName("sun.jvm.hotspot.runtime.linux_" +
+ cpu.toLowerCase() + ".Linux" + cpu.toUpperCase() +
+ "JavaThreadPDAccess").newInstance();
+ } catch (Exception e) {
+ throw new RuntimeException("OS/CPU combination " + os + "/" + cpu +
+ " not yet supported");
+ }
}
} else if (os.equals("bsd")) {
if (cpu.equals("x86")) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Aug 31 16:17:40 2012 -0700
@@ -92,6 +92,8 @@
private boolean usingServerCompiler;
/** Flag indicating whether UseTLAB is turned on */
private boolean useTLAB;
+ /** Flag indicating whether invokedynamic support is on */
+ private boolean enableInvokeDynamic;
/** alignment constants */
private boolean isLP64;
private int bytesPerLong;
@@ -317,6 +319,7 @@
}
useTLAB = (db.lookupIntConstant("UseTLAB").intValue() != 0);
+ enableInvokeDynamic = (db.lookupIntConstant("EnableInvokeDynamic").intValue() != 0);
if (debugger != null) {
isLP64 = debugger.getMachineDescription().isLP64();
@@ -552,6 +555,10 @@
return useTLAB;
}
+ public boolean getEnableInvokeDynamic() {
+ return enableInvokeDynamic;
+ }
+
public TypeDataBase getTypeDataBase() {
return db;
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Fri Aug 31 16:17:40 2012 -0700
@@ -204,7 +204,13 @@
} else if (cpu.equals("ia64")) {
cpuHelper = new IA64Helper();
} else {
+ try {
+ cpuHelper = (CPUHelper)Class.forName("sun.jvm.hotspot.asm." +
+ cpu.toLowerCase() + "." + cpu.toUpperCase() +
+ "Helper").newInstance();
+ } catch (Exception e) {
throw new RuntimeException("cpu '" + cpu + "' is not yet supported!");
+ }
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AltPlatformInfo.java Fri Aug 31 16:17:40 2012 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.utilities;
+
+public interface AltPlatformInfo {
+ // Additional cpu types can be tested via this interface
+
+ public boolean knownCPU(String cpu);
+}
\ No newline at end of file
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java Fri Aug 31 16:17:40 2012 -0700
@@ -64,6 +64,13 @@
} else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64")) {
return cpu;
} else {
+ try {
+ Class pic = Class.forName("sun.jvm.hotspot.utilities.PlatformInfoClosed");
+ AltPlatformInfo api = (AltPlatformInfo)pic.newInstance();
+ if (api.knownCPU(cpu)) {
+ return cpu;
+ }
+ } catch (Exception e) {}
throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
}
}
--- a/hotspot/make/defs.make Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/make/defs.make Fri Aug 31 16:17:40 2012 -0700
@@ -22,6 +22,14 @@
#
#
+ifeq ($(HS_ALT_MAKE),)
+ ifneq ($(OPENJDK),true)
+ HS_ALT_MAKE=$(GAMMADIR)/make/closed
+ else
+ HS_ALT_MAKE=NO_SUCH_PATH
+ endif
+endif
+
# The common definitions for hotspot builds.
# Optionally include SPEC file generated by configure.
@@ -327,3 +335,4 @@
ifndef JAVASE_EMBEDDED
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
endif
+
--- a/hotspot/make/hotspot_version Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/make/hotspot_version Fri Aug 31 16:17:40 2012 -0700
@@ -35,7 +35,7 @@
HS_MAJOR_VER=24
HS_MINOR_VER=0
-HS_BUILD_NUMBER=21
+HS_BUILD_NUMBER=22
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
--- a/hotspot/make/linux/makefiles/defs.make Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/make/linux/makefiles/defs.make Fri Aug 31 16:17:40 2012 -0700
@@ -295,6 +295,8 @@
ADD_SA_BINARIES/arm =
ADD_SA_BINARIES/zero =
+-include $(HS_ALT_MAKE)/linux/makefiles/defs.make
+
EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))
--- a/hotspot/make/linux/makefiles/sa.make Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/make/linux/makefiles/sa.make Fri Aug 31 16:17:40 2012 -0700
@@ -30,10 +30,16 @@
include $(GAMMADIR)/make/linux/makefiles/rules.make
+include $(GAMMADIR)/make/defs.make
+include $(GAMMADIR)/make/altsrc.make
+
AGENT_DIR = $(GAMMADIR)/agent
include $(GAMMADIR)/make/sa.files
+-include $(HS_ALT_MAKE)/linux/makefiles/sa.make
+
+
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@@ -52,17 +58,15 @@
SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
# if $(AGENT_DIR) does not exist, we don't build SA
-# also, we don't build SA on Itanium, PowerPC, ARM or zero.
+# also, we don't build SA on Itanium or zero.
all:
if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \
- -a "$(SRCARCH)" != "arm" \
- -a "$(SRCARCH)" != "ppc" \
-a "$(SRCARCH)" != "zero" ] ; then \
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi
-$(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
+$(GENERATED)/sa-jdi.jar:: $(AGENT_FILES)
$(QUIETLY) echo "Making $@"
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@@ -111,3 +115,5 @@
rm -rf $(SA_CLASSDIR)
rm -rf $(GENERATED)/sa-jdi.jar
rm -rf $(AGENT_FILES_LIST)
+
+-include $(HS_ALT_MAKE)/linux/makefiles/sa-rules.make
--- a/hotspot/make/linux/makefiles/saproc.make Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/make/linux/makefiles/saproc.make Fri Aug 31 16:17:40 2012 -0700
@@ -21,6 +21,8 @@
# questions.
#
#
+include $(GAMMADIR)/make/defs.make
+include $(GAMMADIR)/make/altsrc.make
# Rules to build serviceability agent library, used by vm.make
@@ -48,6 +50,8 @@
$(SASRCDIR)/ps_core.c \
$(SASRCDIR)/LinuxDebuggerLocal.c
+-include $(HS_ALT_MAKE)/linux/makefiles/saproc.make
+
SAMAPFILE = $(SASRCDIR)/mapfile
DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
@@ -60,15 +64,19 @@
endif
# if $(AGENT_DIR) does not exist, we don't build SA
-# also, we don't build SA on Itanium, PPC, ARM or zero.
+# also, we don't build SA on Itanium or zero.
ifneq ($(wildcard $(AGENT_DIR)),)
-ifneq ($(filter-out ia64 arm ppc zero,$(SRCARCH)),)
+ifneq ($(filter-out ia64 zero,$(SRCARCH)),)
BUILDLIBSAPROC = $(LIBSAPROC)
endif
endif
-
+ifneq ($(ALT_SASRCDIR),)
+ALT_SAINCDIR=-I$(ALT_SASRCDIR)
+else
+ALT_SAINCDIR=
+endif
SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE)
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
@@ -84,6 +92,7 @@
-I$(GENERATED) \
-I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
+ $(ALT_SAINCDIR) \
$(SASRCFILES) \
$(SA_LFLAGS) \
$(SA_DEBUG_CFLAGS) \
--- a/hotspot/make/pic.make Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/make/pic.make Fri Aug 31 16:17:40 2012 -0700
@@ -32,7 +32,7 @@
ifndef LP64
PARTIAL_NONPIC=1
endif
- PIC_ARCH = ppc
+ PIC_ARCH = ppc arm
ifneq ("$(filter $(PIC_ARCH),$(BUILDARCH))","")
PARTIAL_NONPIC=0
endif
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,4 +75,43 @@
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
+
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
+ \
+ product(intx, UseVIS, 99, \
+ "Highest supported VIS instructions set on Sparc") \
+ \
+ product(bool, UseCBCond, false, \
+ "Use compare and branch instruction on SPARC") \
+ \
+ product(bool, UseBlockZeroing, false, \
+ "Use special cpu instructions for block zeroing") \
+ \
+ product(intx, BlockZeroingLowLimit, 2048, \
+ "Minimum size in bytes when block zeroing will be used") \
+ \
+ product(bool, UseBlockCopy, false, \
+ "Use special cpu instructions for block copy") \
+ \
+ product(intx, BlockCopyLowLimit, 2048, \
+ "Minimum size in bytes when block copy will be used") \
+ \
+ develop(bool, UseV8InstrsOnly, false, \
+ "Use SPARC-V8 Compliant instruction subset") \
+ \
+ product(bool, UseNiagaraInstrs, false, \
+ "Use Niagara-efficient instruction subset") \
+ \
+ develop(bool, UseCASForSwap, false, \
+ "Do not use swap instructions, but only CAS (in a loop) on SPARC")\
+ \
+ product(uintx, ArraycopySrcPrefetchDistance, 0, \
+ "Distance to prefetch source array in arracopy") \
+ \
+ product(uintx, ArraycopyDstPrefetchDistance, 0, \
+ "Distance to prefetch destination array in arracopy") \
+ \
+ develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
+ "Number of times to spin wait on a v8 atomic operation lock") \
+
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,4 +78,53 @@
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
+
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
+ \
+ develop(bool, IEEEPrecision, true, \
+ "Enables IEEE precision (for INTEL only)") \
+ \
+ product(intx, FenceInstruction, 0, \
+ "(Unsafe,Unstable) Experimental") \
+ \
+ product(intx, ReadPrefetchInstr, 0, \
+ "Prefetch instruction to prefetch ahead") \
+ \
+ product(bool, UseStoreImmI16, true, \
+ "Use store immediate 16-bits value instruction on x86") \
+ \
+ product(intx, UseAVX, 99, \
+ "Highest supported AVX instructions set on x86/x64") \
+ \
+ diagnostic(bool, UseIncDec, true, \
+ "Use INC, DEC instructions on x86") \
+ \
+ product(bool, UseNewLongLShift, false, \
+ "Use optimized bitwise shift left") \
+ \
+ product(bool, UseAddressNop, false, \
+ "Use '0F 1F [addr]' NOP instructions on x86 cpus") \
+ \
+ product(bool, UseXmmLoadAndClearUpper, true, \
+ "Load low part of XMM register and clear upper part") \
+ \
+ product(bool, UseXmmRegToRegMoveAll, false, \
+ "Copy all XMM register bits when moving value between registers") \
+ \
+ product(bool, UseXmmI2D, false, \
+ "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \
+ \
+ product(bool, UseXmmI2F, false, \
+ "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \
+ \
+ product(bool, UseUnalignedLoadStores, false, \
+ "Use SSE2 MOVDQU instruction for Arraycopy") \
+ \
+ /* assembler */ \
+ product(bool, Use486InstrsOnly, false, \
+ "Use 80486 Compliant instruction subset") \
+ \
+ product(bool, UseCountLeadingZerosInstruction, false, \
+ "Use count leading zeros instruction") \
+
#endif // CPU_X86_VM_GLOBALS_X86_HPP
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -60,4 +60,7 @@
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
+
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
+
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -202,7 +202,7 @@
static void fast_thread_clock_init(void);
#endif
- static bool supports_monotonic_clock() {
+ static inline bool supports_monotonic_clock() {
return _clock_gettime != NULL;
}
--- a/hotspot/src/os/linux/vm/os_linux.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -178,7 +178,7 @@
// fast POSIX clocks support
static void fast_thread_clock_init(void);
- static bool supports_monotonic_clock() {
+ static inline bool supports_monotonic_clock() {
return _clock_gettime != NULL;
}
--- a/hotspot/src/os/posix/launcher/launcher.script Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/os/posix/launcher/launcher.script Fri Aug 31 16:17:40 2012 -0700
@@ -29,7 +29,7 @@
# inside Emacs".
#
# If the first parameter is "-dbx", HotSpot will be launched inside dbx.
-#
+#
# If the first parameter is "-valgrind", HotSpot will be launched
# inside Valgrind (http://valgrind.kde.org) using the Memcheck skin,
# and with memory leak detection enabled. This currently (2005jan19)
@@ -45,19 +45,19 @@
# This is the name of the gdb binary to use
if [ ! "$GDB" ]
-then
+then
GDB=gdb
fi
# This is the name of the gdb binary to use
if [ ! "$DBX" ]
-then
+then
DBX=dbx
fi
# This is the name of the Valgrind binary to use
if [ ! "$VALGRIND" ]
-then
+then
VALGRIND=valgrind
fi
@@ -98,7 +98,7 @@
JDK=
if [ "${ALT_JAVA_HOME}" = "" ]; then
. ${MYDIR}/jdkpath.sh
-else
+else
JDK=${ALT_JAVA_HOME%%/jre};
fi
@@ -114,22 +114,34 @@
# any.
JRE=$JDK/jre
JAVA_HOME=$JDK
+export JAVA_HOME
+
ARCH=@@LIBARCH@@
-
SBP=${MYDIR}:${JRE}/lib/${ARCH}
-# Set up a suitable LD_LIBRARY_PATH
-if [ -z "$LD_LIBRARY_PATH" ]
+# Set up a suitable LD_LIBRARY_PATH or DYLD_LIBRARY_PATH
+OS=`uname -s`
+if [ "${OS}" = "Darwin" ]
then
- LD_LIBRARY_PATH="$SBP"
+ if [ -z "$DYLD_LIBRARY_PATH" ]
+ then
+ DYLD_LIBRARY_PATH="$SBP"
+ else
+ DYLD_LIBRARY_PATH="$SBP:$DYLD_LIBRARY_PATH"
+ fi
+ export DYLD_LIBRARY_PATH
else
- LD_LIBRARY_PATH="$SBP:$LD_LIBRARY_PATH"
+ # not 'Darwin'
+ if [ -z "$LD_LIBRARY_PATH" ]
+ then
+ LD_LIBRARY_PATH="$SBP"
+ else
+ LD_LIBRARY_PATH="$SBP:$LD_LIBRARY_PATH"
+ fi
+ export LD_LIBRARY_PATH
fi
-export LD_LIBRARY_PATH
-export JAVA_HOME
-
JPARMS="$@ $JAVA_ARGS";
# Locate the gamma development launcher
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -1646,15 +1646,15 @@
void GraphBuilder::invoke(Bytecodes::Code code) {
- const bool is_invokedynamic = (code == Bytecodes::_invokedynamic);
-
bool will_link;
- ciMethod* target = stream()->get_method(will_link);
+ ciSignature* declared_signature = NULL;
+ ciMethod* target = stream()->get_method(will_link, &declared_signature);
ciKlass* holder = stream()->get_declared_method_holder();
const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
+ assert(declared_signature != NULL, "cannot be null");
// FIXME bail out for now
- if ((bc_raw == Bytecodes::_invokehandle || is_invokedynamic) && !will_link) {
+ if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
}
@@ -1840,7 +1840,7 @@
bool success = false;
if (target->is_method_handle_intrinsic()) {
// method handle invokes
- success = for_method_handle_inline(target);
+ success = try_method_handle_inline(target);
} else {
// static binding => check if callee is ok
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
@@ -1877,7 +1877,7 @@
// inlining not successful => standard invoke
bool is_loaded = target->is_loaded();
- ValueType* result_type = as_ValueType(target->return_type());
+ ValueType* result_type = as_ValueType(declared_signature->return_type());
ValueStack* state_before = copy_state_exhandling();
// The bytecode (code) might change in this method so we are checking this very late.
@@ -3823,7 +3823,7 @@
}
-bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
+bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
ValueStack* state_before = state()->copy_for_parsing();
vmIntrinsics::ID iid = callee->intrinsic_id();
switch (iid) {
@@ -3858,7 +3858,7 @@
// If the target is another method handle invoke try recursivly to get
// a better target.
if (target->is_method_handle_intrinsic()) {
- if (for_method_handle_inline(target)) {
+ if (try_method_handle_inline(target)) {
return true;
}
} else {
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -346,7 +346,7 @@
const char* should_not_inline(ciMethod* callee) const;
// JSR 292 support
- bool for_method_handle_inline(ciMethod* callee);
+ bool try_method_handle_inline(ciMethod* callee);
// helpers
void inline_bailout(const char* msg);
--- a/hotspot/src/share/vm/c1/c1_globals.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -156,18 +156,12 @@
develop(bool, CanonicalizeNodes, true, \
"Canonicalize graph nodes") \
\
- develop(bool, CanonicalizeExperimental, false, \
- "Canonicalize graph nodes, experimental code") \
- \
develop(bool, PrintCanonicalization, false, \
"Print graph node canonicalization") \
\
develop(bool, UseTableRanges, true, \
"Faster versions of lookup table using ranges") \
\
- develop(bool, UseFastExceptionHandling, true, \
- "Faster handling of exceptions") \
- \
develop_pd(bool, RoundFPResults, \
"Indicates whether rounding is needed for floating point results")\
\
@@ -224,9 +218,6 @@
develop(bool, PinAllInstructions, false, \
"All instructions are pinned") \
\
- develop(bool, ValueStackPinStackAll, true, \
- "Pinning in ValueStack pin everything") \
- \
develop(bool, UseFastNewInstance, true, \
"Use fast inlined instance allocation") \
\
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -236,12 +236,16 @@
ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
ciInstanceKlass* actual_recv = callee_holder;
- // some methods are obviously bindable without any type checks so
- // convert them directly to an invokespecial.
+ // Some methods are obviously bindable without any type checks so
+ // convert them directly to an invokespecial or invokestatic.
if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
switch (code) {
- case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break;
- case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break;
+ case Bytecodes::_invokevirtual:
+ code = Bytecodes::_invokespecial;
+ break;
+ case Bytecodes::_invokehandle:
+ code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
+ break;
}
}
@@ -826,8 +830,8 @@
break;
case Bytecodes::_getstatic:
case Bytecodes::_getfield:
- { bool will_link;
- ciField* field = s.get_field(will_link);
+ { bool ignored_will_link;
+ ciField* field = s.get_field(ignored_will_link);
BasicType field_type = field->type()->basic_type();
if (s.cur_bc() != Bytecodes::_getstatic) {
set_method_escape(state.apop());
@@ -865,16 +869,21 @@
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
- { bool will_link;
- ciMethod* target = s.get_method(will_link);
- ciKlass* holder = s.get_declared_method_holder();
+ { bool ignored_will_link;
+ ciSignature* declared_signature = NULL;
+ ciMethod* target = s.get_method(ignored_will_link, &declared_signature);
+ ciKlass* holder = s.get_declared_method_holder();
+ assert(declared_signature != NULL, "cannot be null");
// Push appendix argument, if one.
if (s.has_appendix()) {
state.apush(unknown_obj);
}
// Pass in raw bytecode because we need to see invokehandle instructions.
invoke(state, s.cur_bc_raw(), target, holder);
- ciType* return_type = target->return_type();
+ // We are using the return type of the declared signature here because
+ // it might be a more concrete type than the one from the target (for
+ // e.g. invokedynamic and invokehandle).
+ ciType* return_type = declared_signature->return_type();
if (!return_type->is_primitive_type()) {
state.apush(unknown_obj);
} else if (return_type->is_one_word()) {
--- a/hotspot/src/share/vm/ci/ciEnv.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -738,91 +738,81 @@
ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
int index, Bytecodes::Code bc,
ciInstanceKlass* accessor) {
- int holder_index = cpool->klass_ref_index_at(index);
- bool holder_is_accessible;
- ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
- ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
-
- // Get the method's name and signature.
- Symbol* name_sym = cpool->name_ref_at(index);
- Symbol* sig_sym = cpool->signature_ref_at(index);
-
- if (cpool->has_preresolution()
- || (holder == ciEnv::MethodHandle_klass() &&
- MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) {
- // Short-circuit lookups for JSR 292-related call sites.
- // That is, do not rely only on name-based lookups, because they may fail
- // if the names are not resolvable in the boot class loader (7056328).
- switch (bc) {
- case Bytecodes::_invokevirtual:
- case Bytecodes::_invokeinterface:
- case Bytecodes::_invokespecial:
- case Bytecodes::_invokestatic:
- {
- oop appendix_oop = NULL;
- methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index);
- if (m != NULL) {
- return get_object(m)->as_method();
- }
- }
- break;
- }
- }
+ if (bc == Bytecodes::_invokedynamic) {
+ ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index);
+ const bool is_resolved = !secondary_entry->is_f1_null();
+ // FIXME: code generation could allow for null (unlinked) call site
+ // The call site could be made patchable as follows:
+ // Load the appendix argument from the constant pool.
+ // Test the appendix argument and jump to a known deopt routine if it is null.
+ // Jump through a patchable call site, which is initially a deopt routine.
+ // Patch the call site to the nmethod entry point of the static compiled lambda form.
+ // As with other two-component call sites, both values must be independently verified.
- if (holder_is_accessible) { // Our declared holder is loaded.
- instanceKlass* lookup = declared_holder->get_instanceKlass();
- methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
- if (m != NULL &&
- (bc == Bytecodes::_invokestatic
- ? instanceKlass::cast(m->method_holder())->is_not_initialized()
- : !instanceKlass::cast(m->method_holder())->is_loaded())) {
- m = NULL;
+ if (is_resolved) {
+ // Get the invoker methodOop and the extra argument from the constant pool.
+ methodOop adapter = secondary_entry->f2_as_vfinal_method();
+ return get_object(adapter)->as_method();
}
- if (m != NULL) {
- // We found the method.
- return get_object(m)->as_method();
- }
- }
-
- // Either the declared holder was not loaded, or the method could
- // not be found. Create a dummy ciMethod to represent the failed
- // lookup.
- ciSymbol* name = get_symbol(name_sym);
- ciSymbol* signature = get_symbol(sig_sym);
- return get_unloaded_method(declared_holder, name, signature, accessor);
-}
-
-// ------------------------------------------------------------------
-// ciEnv::get_fake_invokedynamic_method_impl
-ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
- int index, Bytecodes::Code bc,
- ciInstanceKlass* accessor) {
- // Compare the following logic with InterpreterRuntime::resolve_invokedynamic.
- assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
-
- ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index);
- bool is_resolved = !secondary_entry->is_f1_null();
- // FIXME: code generation could allow for null (unlinked) call site
- // The call site could be made patchable as follows:
- // Load the appendix argument from the constant pool.
- // Test the appendix argument and jump to a known deopt routine if it is null.
- // Jump through a patchable call site, which is initially a deopt routine.
- // Patch the call site to the nmethod entry point of the static compiled lambda form.
- // As with other two-component call sites, both values must be independently verified.
-
- // Call site might not be resolved yet.
- // Stop the code path here with an unlinked method.
- if (!is_resolved) {
+ // Fake a method that is equivalent to a declared method.
ciInstanceKlass* holder = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
ciSymbol* name = ciSymbol::invokeBasic_name();
ciSymbol* signature = get_symbol(cpool->signature_ref_at(index));
return get_unloaded_method(holder, name, signature, accessor);
- }
+ } else {
+ const int holder_index = cpool->klass_ref_index_at(index);
+ bool holder_is_accessible;
+ ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
+ ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
+
+ // Get the method's name and signature.
+ Symbol* name_sym = cpool->name_ref_at(index);
+ Symbol* sig_sym = cpool->signature_ref_at(index);
- // Get the invoker methodOop and the extra argument from the constant pool.
- methodOop adapter = secondary_entry->f2_as_vfinal_method();
- return get_object(adapter)->as_method();
+ if (cpool->has_preresolution()
+ || (holder == ciEnv::MethodHandle_klass() &&
+ MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) {
+ // Short-circuit lookups for JSR 292-related call sites.
+ // That is, do not rely only on name-based lookups, because they may fail
+ // if the names are not resolvable in the boot class loader (7056328).
+ switch (bc) {
+ case Bytecodes::_invokevirtual:
+ case Bytecodes::_invokeinterface:
+ case Bytecodes::_invokespecial:
+ case Bytecodes::_invokestatic:
+ {
+ methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index);
+ if (m != NULL) {
+ return get_object(m)->as_method();
+ }
+ }
+ break;
+ }
+ }
+
+ if (holder_is_accessible) { // Our declared holder is loaded.
+ instanceKlass* lookup = declared_holder->get_instanceKlass();
+ methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
+ if (m != NULL &&
+ (bc == Bytecodes::_invokestatic
+ ? instanceKlass::cast(m->method_holder())->is_not_initialized()
+ : !instanceKlass::cast(m->method_holder())->is_loaded())) {
+ m = NULL;
+ }
+ if (m != NULL) {
+ // We found the method.
+ return get_object(m)->as_method();
+ }
+ }
+
+ // Either the declared holder was not loaded, or the method could
+ // not be found. Create a dummy ciMethod to represent the failed
+ // lookup.
+ ciSymbol* name = get_symbol(name_sym);
+ ciSymbol* signature = get_symbol(sig_sym);
+ return get_unloaded_method(declared_holder, name, signature, accessor);
+ }
}
@@ -853,11 +843,7 @@
ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool,
int index, Bytecodes::Code bc,
ciInstanceKlass* accessor) {
- if (bc == Bytecodes::_invokedynamic) {
- GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc, accessor);)
- } else {
- GUARDED_VM_ENTRY(return get_method_by_index_impl( cpool, index, bc, accessor);)
- }
+ GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);)
}
--- a/hotspot/src/share/vm/ci/ciEnv.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -152,9 +152,6 @@
ciMethod* get_method_by_index_impl(constantPoolHandle cpool,
int method_index, Bytecodes::Code bc,
ciInstanceKlass* loading_klass);
- ciMethod* get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
- int index, Bytecodes::Code bc,
- ciInstanceKlass* accessor);
// Helper methods
bool check_klass_accessibility(ciKlass* accessing_klass,
--- a/hotspot/src/share/vm/ci/ciMethod.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -1215,9 +1215,10 @@
holder()->print_name_on(st);
st->print(" signature=");
signature()->as_symbol()->print_symbol_on(st);
- st->print(" arg_size=%d", arg_size());
if (is_loaded()) {
- st->print(" loaded=true flags=");
+ st->print(" loaded=true");
+ st->print(" arg_size=%d", arg_size());
+ st->print(" flags=");
flags().print_member_flags(st);
} else {
st->print(" loaded=false");
--- a/hotspot/src/share/vm/ci/ciStreams.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciStreams.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -355,11 +355,23 @@
// ciBytecodeStream::get_method
//
// If this is a method invocation bytecode, get the invoked method.
-ciMethod* ciBytecodeStream::get_method(bool& will_link) {
+// Additionally return the declared signature to get more concrete
+// type information if required (Cf. invokedynamic and invokehandle).
+ciMethod* ciBytecodeStream::get_method(bool& will_link, ciSignature* *declared_signature_result) {
VM_ENTRY_MARK;
+ ciEnv* env = CURRENT_ENV;
constantPoolHandle cpool(_method->get_methodOop()->constants());
- ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
+ ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
will_link = m->is_loaded();
+ // Get declared method signature and return it.
+ if (has_optional_appendix()) {
+ const int sig_index = get_method_signature_index();
+ Symbol* sig_sym = cpool->symbol_at(sig_index);
+ ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass();
+ (*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym));
+ } else {
+ (*declared_signature_result) = m->signature();
+ }
return m;
}
@@ -419,35 +431,18 @@
}
// ------------------------------------------------------------------
-// ciBytecodeStream::get_declared_method_signature
-//
-// Get the declared signature of the currently referenced method.
-//
-// This is always the same as the signature of the resolved method
-// itself, except for _invokehandle and _invokedynamic calls.
-//
-ciSignature* ciBytecodeStream::get_declared_method_signature() {
- int sig_index = get_method_signature_index();
- VM_ENTRY_MARK;
- ciEnv* env = CURRENT_ENV;
- constantPoolHandle cpool(_method->get_methodOop()->constants());
- Symbol* sig_sym = cpool->symbol_at(sig_index);
- ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass();
- return new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym));
-}
-
-// ------------------------------------------------------------------
// ciBytecodeStream::get_method_signature_index
//
// Get the constant pool index of the signature of the method
// referenced by the current bytecode. Used for generating
// deoptimization information.
int ciBytecodeStream::get_method_signature_index() {
- VM_ENTRY_MARK;
- constantPoolOop cpool = _holder->get_instanceKlass()->constants();
- int method_index = get_method_index();
- int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
- return cpool->signature_ref_index_at(name_and_type_index);
+ GUARDED_VM_ENTRY(
+ constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ const int method_index = get_method_index();
+ const int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
+ return cpool->signature_ref_index_at(name_and_type_index);
+ )
}
// ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciStreams.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -151,6 +151,8 @@
// Does this instruction contain an index which refes into the CP cache?
bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
+ bool has_optional_appendix() { return Bytecodes::has_optional_appendix(cur_bc_raw()); }
+
int get_index_u1() const {
return bytecode().get_index_u1(cur_bc_raw());
}
@@ -257,13 +259,11 @@
int get_field_holder_index();
int get_field_signature_index();
- // If this is a method invocation bytecode, get the invoked method.
- ciMethod* get_method(bool& will_link);
+ ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result);
bool has_appendix();
ciObject* get_appendix();
ciKlass* get_declared_method_holder();
int get_method_holder_index();
- ciSignature* get_declared_method_signature();
int get_method_signature_index();
ciCPCache* get_cpcache() const;
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -643,9 +643,11 @@
// ------------------------------------------------------------------
// ciTypeFlow::StateVector::do_invoke
void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
- bool has_receiver_foo) {
+ bool has_receiver) {
bool will_link;
- ciMethod* callee = str->get_method(will_link);
+ ciSignature* declared_signature = NULL;
+ ciMethod* callee = str->get_method(will_link, &declared_signature);
+ assert(declared_signature != NULL, "cannot be null");
if (!will_link) {
// We weren't able to find the method.
if (str->cur_bc() == Bytecodes::_invokedynamic) {
@@ -658,22 +660,12 @@
trap(str, unloaded_holder, str->get_method_holder_index());
}
} else {
- // TODO Use Bytecode_invoke after metadata changes.
- //Bytecode_invoke inv(str->method(), str->cur_bci());
- //const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver();
- Bytecode inv(str);
- Bytecodes::Code code = inv.invoke_code();
- const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic;
-
- ciSignature* signature = callee->signature();
- ciSignatureStream sigstr(signature);
- // Push appendix argument, if one.
- if (str->has_appendix()) {
- ciObject* appendix = str->get_appendix();
- push_object(appendix->klass());
- }
- int arg_size = signature->size();
- int stack_base = stack_size() - arg_size;
+ // We are using the declared signature here because it might be
+ // different from the callee signature (Cf. invokedynamic and
+ // invokehandle).
+ ciSignatureStream sigstr(declared_signature);
+ const int arg_size = declared_signature->size();
+ const int stack_base = stack_size() - arg_size;
int i = 0;
for( ; !sigstr.at_return_type(); sigstr.next()) {
ciType* type = sigstr.type();
@@ -689,7 +681,6 @@
for (int j = 0; j < arg_size; j++) {
pop();
}
- assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch");
if (has_receiver) {
// Check this?
pop_object();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -1246,6 +1246,31 @@
heap_region_iterate(&cl);
}
+double G1CollectedHeap::verify(bool guard, const char* msg) {
+ double verify_time_ms = 0.0;
+
+ if (guard && total_collections() >= VerifyGCStartAt) {
+ double verify_start = os::elapsedTime();
+ HandleMark hm; // Discard invalid handles created during verification
+ gclog_or_tty->print(msg);
+ prepare_for_verify();
+ Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
+ verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
+ }
+
+ return verify_time_ms;
+}
+
+void G1CollectedHeap::verify_before_gc() {
+ double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
+ g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
+}
+
+void G1CollectedHeap::verify_after_gc() {
+ double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
+ g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
+}
+
bool G1CollectedHeap::do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size) {
@@ -1304,14 +1329,8 @@
size_t g1h_prev_used = used();
assert(used() == recalculate_used(), "Should be equal");
- if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
- HandleMark hm; // Discard invalid handles created during verification
- gclog_or_tty->print(" VerifyBeforeGC:");
- prepare_for_verify();
- Universe::verify(/* silent */ false,
- /* option */ VerifyOption_G1UsePrevMarking);
-
- }
+ verify_before_gc();
+
pre_full_gc_dump();
COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -1378,14 +1397,7 @@
MemoryService::track_memory_usage();
- if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
- HandleMark hm; // Discard invalid handles created during verification
- gclog_or_tty->print(" VerifyAfterGC:");
- prepare_for_verify();
- Universe::verify(/* silent */ false,
- /* option */ VerifyOption_G1UsePrevMarking);
-
- }
+ verify_after_gc();
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
@@ -1933,6 +1945,9 @@
clear_cset_start_regions();
+ // Initialize the G1EvacuationFailureALot counters and flags.
+ NOT_PRODUCT(reset_evacuation_should_fail();)
+
guarantee(_task_queues != NULL, "task_queues allocation failure.");
#ifdef SPARC
// Issue a stern warning, but allow use for experimentation and debugging.
@@ -2327,8 +2342,7 @@
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
n_completed_buffers++;
}
- g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i,
- (double) n_completed_buffers);
+ g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
dcqs.clear_n_completed_buffers();
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
@@ -3735,8 +3749,9 @@
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1);
- g1_policy()->phase_times()->note_gc_start(os::elapsedTime(), active_workers,
- g1_policy()->gcs_are_young(), g1_policy()->during_initial_mark_pause(), gc_cause());
+ double pause_start_sec = os::elapsedTime();
+ g1_policy()->phase_times()->note_gc_start(active_workers);
+ bool initial_mark_gc = g1_policy()->during_initial_mark_pause();
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
@@ -3765,13 +3780,7 @@
increment_total_collections(false /* full gc */);
increment_gc_time_stamp();
- if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
- HandleMark hm; // Discard invalid handles created during verification
- gclog_or_tty->print(" VerifyBeforeGC:");
- prepare_for_verify();
- Universe::verify(/* silent */ false,
- /* option */ VerifyOption_G1UsePrevMarking);
- }
+ verify_before_gc();
COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -3984,10 +3993,6 @@
true /* verify_fingers */);
_cm->note_end_of_gc();
- // Collect thread local data to allow the ergonomics to use
- // the collected information
- g1_policy()->phase_times()->collapse_par_times();
-
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
@@ -4020,13 +4025,7 @@
// scanning cards (see CR 7039627).
increment_gc_time_stamp();
- if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
- HandleMark hm; // Discard invalid handles created during verification
- gclog_or_tty->print(" VerifyAfterGC:");
- prepare_for_verify();
- Universe::verify(/* silent */ false,
- /* option */ VerifyOption_G1UsePrevMarking);
- }
+ verify_after_gc();
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
@@ -4050,10 +4049,35 @@
gc_epilogue(false);
- g1_policy()->phase_times()->note_gc_end(os::elapsedTime());
-
- // We have to do this after we decide whether to expand the heap or not.
+ if (G1Log::fine()) {
+ if (PrintGCTimeStamps) {
+ gclog_or_tty->stamp();
+ gclog_or_tty->print(": ");
+ }
+
+ GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
+ .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
+ .append(initial_mark_gc ? " (initial-mark)" : "");
+
+ double pause_time_sec = os::elapsedTime() - pause_start_sec;
+
+ if (G1Log::finer()) {
+ if (evacuation_failed()) {
+ gc_cause_str.append(" (to-space exhausted)");
+ }
+ gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_sec);
+ g1_policy()->phase_times()->note_gc_end();
+ g1_policy()->phase_times()->print(pause_time_sec);
+ g1_policy()->print_detailed_heap_transition();
+ } else {
+ if (evacuation_failed()) {
+ gc_cause_str.append("--");
+ }
+ gclog_or_tty->print("[%s", (const char*)gc_cause_str);
g1_policy()->print_heap_transition();
+ gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
+ }
+ }
}
// It is not yet to safe to tell the concurrent mark to
@@ -4543,7 +4567,15 @@
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
- oop obj = oop(obj_ptr);
+#ifndef PRODUCT
+ // Should this evacuation fail?
+ if (_g1->evacuation_should_fail()) {
+ if (obj_ptr != NULL) {
+ _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
+ obj_ptr = NULL;
+ }
+ }
+#endif // !PRODUCT
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
@@ -4552,6 +4584,8 @@
return _g1->handle_evacuation_failure_par(cl, old);
}
+ oop obj = oop(obj_ptr);
+
// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
@@ -4863,7 +4897,7 @@
evac.do_void();
double elapsed_ms = (os::elapsedTime()-start)*1000.0;
double term_ms = pss.term_time()*1000.0;
- _g1h->g1_policy()->phase_times()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
+ _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
_g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
}
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
@@ -4991,27 +5025,28 @@
buf_scan_non_heap_roots.done();
buf_scan_perm.done();
- double ext_roots_end = os::elapsedTime();
-
- g1_policy()->phase_times()->reset_obj_copy_time(worker_i);
double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
buf_scan_non_heap_roots.closure_app_seconds();
g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
double ext_root_time_ms =
- ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
+ ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
// During conc marking we have to filter the per-thread SATB buffers
// to make sure we remove any oops into the CSet (which will show up
// as implicitly live).
+ double satb_filtering_ms = 0.0;
if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
if (mark_in_progress()) {
+ double satb_filter_start = os::elapsedTime();
+
JavaThread::satb_mark_queue_set().filter_thread_buffers();
+
+ satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
}
}
- double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
// Now scan the complement of the collection set.
@@ -5556,6 +5591,9 @@
_expand_heap_after_alloc_failure = true;
set_evacuation_failed(false);
+ // Should G1EvacuationFailureALot be in effect for this GC?
+ NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
+
g1_rem_set()->prepare_for_oops_into_collection_set_do();
concurrent_g1_refine()->set_use_cache(false);
concurrent_g1_refine()->clear_hot_cache_claimed_index();
@@ -5647,11 +5685,11 @@
if (evacuation_failed()) {
remove_self_forwarding_pointers();
- if (G1Log::finer()) {
- gclog_or_tty->print(" (to-space exhausted)");
- } else if (G1Log::fine()) {
- gclog_or_tty->print("--");
- }
+
+ // Reset the G1EvacuationFailureALot counters and flags
+ // Note: the values are reset only when an actual
+ // evacuation failure occurs.
+ NOT_PRODUCT(reset_evacuation_should_fail();)
}
// Enqueue any remaining references remaining on the STW
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -405,6 +405,10 @@
// heap after a compaction.
void print_hrs_post_compaction();
+ double verify(bool guard, const char* msg);
+ void verify_before_gc();
+ void verify_after_gc();
+
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
@@ -911,6 +915,39 @@
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
+#ifndef PRODUCT
+ // Support for forcing evacuation failures. Analogous to
+ // PromotionFailureALot for the other collectors.
+
+ // Records whether G1EvacuationFailureALot should be in effect
+ // for the current GC
+ bool _evacuation_failure_alot_for_current_gc;
+
+ // Used to record the GC number for interval checking when
+ // determining whether G1EvaucationFailureALot is in effect
+ // for the current GC.
+ size_t _evacuation_failure_alot_gc_number;
+
+ // Count of the number of evacuations between failures.
+ volatile size_t _evacuation_failure_alot_count;
+
+ // Set whether G1EvacuationFailureALot should be in effect
+ // for the current GC (based upon the type of GC and which
+ // command line flags are set);
+ inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
+ bool during_initial_mark,
+ bool during_marking);
+
+ inline void set_evacuation_failure_alot_for_current_gc();
+
+ // Return true if it's time to cause an evacuation failure.
+ inline bool evacuation_should_fail();
+
+ // Reset the G1EvacuationFailureALot counters. Should be called at
+ // the end of an evacuation pause in which an evacuation failure ocurred.
+ inline void reset_evacuation_should_fail();
+#endif // !PRODUCT
+
// ("Weak") Reference processing support.
//
// G1 has 2 instances of the referece processor class. One
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -138,7 +138,7 @@
return _task_queues->queue(i);
}
-inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
+inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
}
@@ -146,4 +146,77 @@
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
}
+#ifndef PRODUCT
+// Support for G1EvacuationFailureALot
+
+inline bool
+G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
+ bool during_initial_mark,
+ bool during_marking) {
+ bool res = false;
+ if (during_marking) {
+ res |= G1EvacuationFailureALotDuringConcMark;
+ }
+ if (during_initial_mark) {
+ res |= G1EvacuationFailureALotDuringInitialMark;
+ }
+ if (gcs_are_young) {
+ res |= G1EvacuationFailureALotDuringYoungGC;
+ } else {
+ // GCs are mixed
+ res |= G1EvacuationFailureALotDuringMixedGC;
+ }
+ return res;
+}
+
+inline void
+G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
+ if (G1EvacuationFailureALot) {
+ // Note we can't assert that _evacuation_failure_alot_for_current_gc
+ // is clear here. It may have been set during a previous GC but that GC
+ // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
+ // trigger an evacuation failure and clear the flags and and counts.
+
+ // Check if we have gone over the interval.
+ const size_t gc_num = total_collections();
+ const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
+
+ _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
+
+ // Now check if G1EvacuationFailureALot is enabled for the current GC type.
+ const bool gcs_are_young = g1_policy()->gcs_are_young();
+ const bool during_im = g1_policy()->during_initial_mark_pause();
+ const bool during_marking = mark_in_progress();
+
+ _evacuation_failure_alot_for_current_gc &=
+ evacuation_failure_alot_for_gc_type(gcs_are_young,
+ during_im,
+ during_marking);
+ }
+}
+
+inline bool
+G1CollectedHeap::evacuation_should_fail() {
+ if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
+ return false;
+ }
+ // G1EvacuationFailureALot is in effect for current GC
+ // Access to _evacuation_failure_alot_count is not atomic;
+ // the value does not have to be exact.
+ if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
+ return false;
+ }
+ _evacuation_failure_alot_count = 0;
+ return true;
+}
+
+inline void G1CollectedHeap::reset_evacuation_should_fail() {
+ if (G1EvacuationFailureALot) {
+ _evacuation_failure_alot_gc_number = total_collections();
+ _evacuation_failure_alot_count = 0;
+ _evacuation_failure_alot_for_current_gc = false;
+ }
+}
+#endif // #ifndef PRODUCT
+
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -795,7 +795,7 @@
_trace_gen0_time_data.record_start_collection(s_w_t_ms);
_stop_world_start = 0.0;
- phase_times()->_cur_collection_start_sec = start_time_sec;
+ phase_times()->record_cur_collection_start_sec(start_time_sec);
_cur_collection_pause_used_at_start_bytes = start_used;
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
_pending_cards = _g1->pending_card_num();
@@ -947,7 +947,7 @@
_trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
// this is where we update the allocation rate of the application
double app_time_ms =
- (phase_times()->_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
+ (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
if (app_time_ms < MIN_TIMER_GRANULARITY) {
// This usually happens due to the timer not having the required
// granularity. Some Linuxes are the usual culprits.
@@ -1035,7 +1035,7 @@
if (update_stats) {
double cost_per_card_ms = 0.0;
if (_pending_cards > 0) {
- cost_per_card_ms = phase_times()->_update_rs_time / (double) _pending_cards;
+ cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
_cost_per_card_ms_seq->add(cost_per_card_ms);
}
@@ -1043,7 +1043,7 @@
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
- cost_per_entry_ms = phase_times()->_scan_rs_time / (double) cards_scanned;
+ cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
if (_last_gc_was_young) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
@@ -1083,7 +1083,7 @@
size_t copied_bytes = surviving_bytes;
double cost_per_byte_ms = 0.0;
if (copied_bytes > 0) {
- cost_per_byte_ms = phase_times()->_obj_copy_time / (double) copied_bytes;
+ cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
if (_in_marking_window) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
@@ -1092,21 +1092,22 @@
}
double all_other_time_ms = pause_time_ms -
- (phase_times()->_update_rs_time + phase_times()->_scan_rs_time + phase_times()->_obj_copy_time + phase_times()->_termination_time);
+ (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
+ + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
double young_other_time_ms = 0.0;
if (young_cset_region_length() > 0) {
young_other_time_ms =
- phase_times()->_recorded_young_cset_choice_time_ms +
- phase_times()->_recorded_young_free_cset_time_ms;
+ phase_times()->young_cset_choice_time_ms() +
+ phase_times()->young_free_cset_time_ms();
_young_other_cost_per_region_ms_seq->add(young_other_time_ms /
(double) young_cset_region_length());
}
double non_young_other_time_ms = 0.0;
if (old_cset_region_length() > 0) {
non_young_other_time_ms =
- phase_times()->_recorded_non_young_cset_choice_time_ms +
- phase_times()->_recorded_non_young_free_cset_time_ms;
+ phase_times()->non_young_cset_choice_time_ms() +
+ phase_times()->non_young_free_cset_time_ms();
_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
(double) old_cset_region_length());
@@ -1133,7 +1134,8 @@
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
- adjust_concurrent_refinement(phase_times()->_update_rs_time, phase_times()->_update_rs_processed_buffers, update_rs_time_goal_ms);
+ adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
+ phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
_collectionSetChooser->verify();
}
@@ -1144,7 +1146,11 @@
proper_unit_for_byte_size((bytes))
void G1CollectorPolicy::print_heap_transition() {
- if (G1Log::finer()) {
+ _g1->print_size_transition(gclog_or_tty,
+ _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
+}
+
+void G1CollectorPolicy::print_detailed_heap_transition() {
YoungList* young_list = _g1->young_list();
size_t eden_bytes = young_list->eden_used_bytes();
size_t survivor_bytes = young_list->survivor_used_bytes();
@@ -1171,11 +1177,6 @@
EXT_SIZE_PARAMS(capacity));
_prev_eden_capacity = eden_capacity;
- } else if (G1Log::fine()) {
- _g1->print_size_transition(gclog_or_tty,
- _cur_collection_pause_used_at_start_bytes,
- _g1->used(), _g1->capacity());
- }
}
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
@@ -1900,8 +1901,7 @@
set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
double young_end_time_sec = os::elapsedTime();
- phase_times()->_recorded_young_cset_choice_time_ms =
- (young_end_time_sec - young_start_time_sec) * 1000.0;
+ phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
// Set the start of the non-young choice time.
double non_young_start_time_sec = young_end_time_sec;
@@ -2015,8 +2015,7 @@
predicted_pause_time_ms, target_pause_time_ms);
double non_young_end_time_sec = os::elapsedTime();
- phase_times()->_recorded_non_young_cset_choice_time_ms =
- (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
+ phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
}
void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
@@ -2035,25 +2034,25 @@
if(TraceGen0Time) {
_total.add(pause_time_ms);
_other.add(pause_time_ms - phase_times->accounted_time_ms());
- _root_region_scan_wait.add(phase_times->_root_region_scan_wait_time_ms);
- _parallel.add(phase_times->_cur_collection_par_time_ms);
- _ext_root_scan.add(phase_times->_ext_root_scan_time);
- _satb_filtering.add(phase_times->_satb_filtering_time);
- _update_rs.add(phase_times->_update_rs_time);
- _scan_rs.add(phase_times->_scan_rs_time);
- _obj_copy.add(phase_times->_obj_copy_time);
- _termination.add(phase_times->_termination_time);
+ _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
+ _parallel.add(phase_times->cur_collection_par_time_ms());
+ _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
+ _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
+ _update_rs.add(phase_times->average_last_update_rs_time());
+ _scan_rs.add(phase_times->average_last_scan_rs_time());
+ _obj_copy.add(phase_times->average_last_obj_copy_time());
+ _termination.add(phase_times->average_last_termination_time());
- double parallel_known_time = phase_times->_ext_root_scan_time +
- phase_times->_satb_filtering_time +
- phase_times->_update_rs_time +
- phase_times->_scan_rs_time +
- phase_times->_obj_copy_time +
- + phase_times->_termination_time;
+ double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
+ phase_times->average_last_satb_filtering_times_ms() +
+ phase_times->average_last_update_rs_time() +
+ phase_times->average_last_scan_rs_time() +
+ phase_times->average_last_obj_copy_time() +
+ + phase_times->average_last_termination_time();
- double parallel_other_time = phase_times->_cur_collection_par_time_ms - parallel_known_time;
+ double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
_parallel_other.add(parallel_other_time);
- _clear_ct.add(phase_times->_cur_clear_ct_time_ms);
+ _clear_ct.add(phase_times->cur_clear_ct_time_ms());
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -683,6 +683,7 @@
void record_collection_pause_end(double pause_time);
void print_heap_transition();
+ void print_detailed_heap_transition();
// Record the fact that a full collection occurred.
void record_full_collection_start();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -79,119 +79,145 @@
}
};
+template <class T>
+void WorkerDataArray<T>::print(int level, const char* title) {
+ if (_length == 1) {
+ // No need for min, max, average and sum for only one worker
+ LineBuffer buf(level);
+ buf.append("[%s: ", title);
+ buf.append(_print_format, _data[0]);
+ buf.append_and_print_cr("]");
+ return;
+ }
+
+ T min = _data[0];
+ T max = _data[0];
+ T sum = 0;
+
+ LineBuffer buf(level);
+ buf.append("[%s:", title);
+ for (uint i = 0; i < _length; ++i) {
+ T val = _data[i];
+ min = MIN2(val, min);
+ max = MAX2(val, max);
+ sum += val;
+ if (G1Log::finest()) {
+ buf.append(" ");
+ buf.append(_print_format, val);
+ }
+ }
+
+ if (G1Log::finest()) {
+ buf.append_and_print_cr("");
+ }
+
+ double avg = (double)sum / (double)_length;
+ buf.append(" Min: ");
+ buf.append(_print_format, min);
+ buf.append(", Avg: ");
+ buf.append("%.1lf", avg); // Always print average as a double
+ buf.append(", Max: ");
+ buf.append(_print_format, max);
+ buf.append(", Diff: ");
+ buf.append(_print_format, max - min);
+ if (_print_sum) {
+ // for things like the start and end times the sum is not
+ // that relevant
+ buf.append(", Sum: ");
+ buf.append(_print_format, sum);
+ }
+ buf.append_and_print_cr("]");
+}
+
+#ifdef ASSERT
+
+template <class T>
+void WorkerDataArray<T>::reset() {
+ for (uint i = 0; i < _length; i++) {
+ _data[i] = (T)-1;
+ }
+}
+
+template <class T>
+void WorkerDataArray<T>::verify() {
+ for (uint i = 0; i < _length; i++) {
+ assert(_data[i] >= (T)0, err_msg("Invalid data for worker %d", i));
+ }
+}
+
+#endif
+
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_max_gc_threads(max_gc_threads),
_min_clear_cc_time_ms(-1.0),
_max_clear_cc_time_ms(-1.0),
_cur_clear_cc_time_ms(0.0),
_cum_clear_cc_time_ms(0.0),
- _num_cc_clears(0L)
+ _num_cc_clears(0L),
+ _last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
+ _last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
+ _last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
+ _last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
+ _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
+ _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
+ _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
+ _last_termination_times_ms(_max_gc_threads, "%.1lf"),
+ _last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
+ _last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false),
+ _last_gc_worker_times_ms(_max_gc_threads, "%.1lf"),
+ _last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf")
{
assert(max_gc_threads > 0, "Must have some GC threads");
- _par_last_gc_worker_start_times_ms = new double[_max_gc_threads];
- _par_last_ext_root_scan_times_ms = new double[_max_gc_threads];
- _par_last_satb_filtering_times_ms = new double[_max_gc_threads];
- _par_last_update_rs_times_ms = new double[_max_gc_threads];
- _par_last_update_rs_processed_buffers = new double[_max_gc_threads];
- _par_last_scan_rs_times_ms = new double[_max_gc_threads];
- _par_last_obj_copy_times_ms = new double[_max_gc_threads];
- _par_last_termination_times_ms = new double[_max_gc_threads];
- _par_last_termination_attempts = new double[_max_gc_threads];
- _par_last_gc_worker_end_times_ms = new double[_max_gc_threads];
- _par_last_gc_worker_times_ms = new double[_max_gc_threads];
- _par_last_gc_worker_other_times_ms = new double[_max_gc_threads];
}
-void G1GCPhaseTimes::note_gc_start(double pause_start_time_sec, uint active_gc_threads,
- bool is_young_gc, bool is_initial_mark_gc, GCCause::Cause gc_cause) {
+void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
assert(active_gc_threads > 0, "The number of threads must be > 0");
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads");
_active_gc_threads = active_gc_threads;
- _pause_start_time_sec = pause_start_time_sec;
- _is_young_gc = is_young_gc;
- _is_initial_mark_gc = is_initial_mark_gc;
- _gc_cause = gc_cause;
-#ifdef ASSERT
- // initialise the timing data to something well known so that we can spot
- // if something is not set properly
-
- for (uint i = 0; i < _max_gc_threads; ++i) {
- _par_last_gc_worker_start_times_ms[i] = -1234.0;
- _par_last_ext_root_scan_times_ms[i] = -1234.0;
- _par_last_satb_filtering_times_ms[i] = -1234.0;
- _par_last_update_rs_times_ms[i] = -1234.0;
- _par_last_update_rs_processed_buffers[i] = -1234.0;
- _par_last_scan_rs_times_ms[i] = -1234.0;
- _par_last_obj_copy_times_ms[i] = -1234.0;
- _par_last_termination_times_ms[i] = -1234.0;
- _par_last_termination_attempts[i] = -1234.0;
- _par_last_gc_worker_end_times_ms[i] = -1234.0;
- _par_last_gc_worker_times_ms[i] = -1234.0;
- _par_last_gc_worker_other_times_ms[i] = -1234.0;
- }
-#endif
+ _last_gc_worker_start_times_ms.reset();
+ _last_ext_root_scan_times_ms.reset();
+ _last_satb_filtering_times_ms.reset();
+ _last_update_rs_times_ms.reset();
+ _last_update_rs_processed_buffers.reset();
+ _last_scan_rs_times_ms.reset();
+ _last_obj_copy_times_ms.reset();
+ _last_termination_times_ms.reset();
+ _last_termination_attempts.reset();
+ _last_gc_worker_end_times_ms.reset();
+ _last_gc_worker_times_ms.reset();
+ _last_gc_worker_other_times_ms.reset();
}
-void G1GCPhaseTimes::note_gc_end(double pause_end_time_sec) {
- if (G1Log::fine()) {
- double pause_time_ms = (pause_end_time_sec - _pause_start_time_sec) * MILLIUNITS;
+void G1GCPhaseTimes::note_gc_end() {
+ _last_gc_worker_start_times_ms.verify();
+ _last_ext_root_scan_times_ms.verify();
+ _last_satb_filtering_times_ms.verify();
+ _last_update_rs_times_ms.verify();
+ _last_update_rs_processed_buffers.verify();
+ _last_scan_rs_times_ms.verify();
+ _last_obj_copy_times_ms.verify();
+ _last_termination_times_ms.verify();
+ _last_termination_attempts.verify();
+ _last_gc_worker_end_times_ms.verify();
for (uint i = 0; i < _active_gc_threads; i++) {
- _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
- _par_last_gc_worker_start_times_ms[i];
+ double worker_time = _last_gc_worker_end_times_ms.get(i) - _last_gc_worker_start_times_ms.get(i);
+ _last_gc_worker_times_ms.set(i, worker_time);
- double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
- _par_last_satb_filtering_times_ms[i] +
- _par_last_update_rs_times_ms[i] +
- _par_last_scan_rs_times_ms[i] +
- _par_last_obj_copy_times_ms[i] +
- _par_last_termination_times_ms[i];
+ double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
+ _last_satb_filtering_times_ms.get(i) +
+ _last_update_rs_times_ms.get(i) +
+ _last_scan_rs_times_ms.get(i) +
+ _last_obj_copy_times_ms.get(i) +
+ _last_termination_times_ms.get(i);
- _par_last_gc_worker_other_times_ms[i] = _par_last_gc_worker_times_ms[i] -
- worker_known_time;
+ double worker_other_time = worker_time - worker_known_time;
+ _last_gc_worker_other_times_ms.set(i, worker_other_time);
}
- print(pause_time_ms);
- }
-
-}
-
-void G1GCPhaseTimes::print_par_stats(int level,
- const char* str,
- double* data,
- bool showDecimals) {
- double min = data[0], max = data[0];
- double total = 0.0;
- LineBuffer buf(level);
- buf.append("[%s (ms):", str);
- for (uint i = 0; i < _active_gc_threads; ++i) {
- double val = data[i];
- if (val < min)
- min = val;
- if (val > max)
- max = val;
- total += val;
- if (G1Log::finest()) {
- if (showDecimals) {
- buf.append(" %.1lf", val);
- } else {
- buf.append(" %d", (int)val);
- }
- }
- }
-
- if (G1Log::finest()) {
- buf.append_and_print_cr("");
- }
- double avg = total / (double) _active_gc_threads;
- if (showDecimals) {
- buf.append_and_print_cr(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf, Sum: %.1lf]",
- min, avg, max, max - min, total);
- } else {
- buf.append_and_print_cr(" Min: %d, Avg: %d, Max: %d, Diff: %d, Sum: %d]",
- (int)min, (int)avg, (int)max, (int)max - (int)min, (int)total);
- }
+ _last_gc_worker_times_ms.verify();
+ _last_gc_worker_other_times_ms.verify();
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
@@ -202,73 +228,6 @@
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
}
-void G1GCPhaseTimes::print_stats(int level, const char* str, int value) {
- LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
-}
-
-double G1GCPhaseTimes::avg_value(double* data) {
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- double ret = 0.0;
- for (uint i = 0; i < _active_gc_threads; ++i) {
- ret += data[i];
- }
- return ret / (double) _active_gc_threads;
- } else {
- return data[0];
- }
-}
-
-double G1GCPhaseTimes::max_value(double* data) {
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- double ret = data[0];
- for (uint i = 1; i < _active_gc_threads; ++i) {
- if (data[i] > ret) {
- ret = data[i];
- }
- }
- return ret;
- } else {
- return data[0];
- }
-}
-
-double G1GCPhaseTimes::sum_of_values(double* data) {
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- double sum = 0.0;
- for (uint i = 0; i < _active_gc_threads; i++) {
- sum += data[i];
- }
- return sum;
- } else {
- return data[0];
- }
-}
-
-double G1GCPhaseTimes::max_sum(double* data1, double* data2) {
- double ret = data1[0] + data2[0];
-
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- for (uint i = 1; i < _active_gc_threads; ++i) {
- double data = data1[i] + data2[i];
- if (data > ret) {
- ret = data;
- }
- }
- }
- return ret;
-}
-
-void G1GCPhaseTimes::collapse_par_times() {
- _ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
- _satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
- _update_rs_time = avg_value(_par_last_update_rs_times_ms);
- _update_rs_processed_buffers =
- sum_of_values(_par_last_update_rs_processed_buffers);
- _scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
- _obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
- _termination_time = avg_value(_par_last_termination_times_ms);
-}
-
double G1GCPhaseTimes::accounted_time_ms() {
// Subtract the root region scanning wait time. It's initialized to
// zero at the start of the pause.
@@ -286,58 +245,37 @@
return misc_time_ms;
}
-void G1GCPhaseTimes::print(double pause_time_ms) {
-
- if (PrintGCTimeStamps) {
- gclog_or_tty->stamp();
- gclog_or_tty->print(": ");
- }
-
- GCCauseString gc_cause_str = GCCauseString("GC pause", _gc_cause)
- .append(_is_young_gc ? " (young)" : " (mixed)")
- .append(_is_initial_mark_gc ? " (initial-mark)" : "");
- gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_ms / 1000.0);
-
- if (!G1Log::finer()) {
- return;
- }
-
+void G1GCPhaseTimes::print(double pause_time_sec) {
if (_root_region_scan_wait_time_ms > 0.0) {
print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
- print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
- print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
- if (_satb_filtering_time > 0.0) {
- print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
+ _last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)");
+ _last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)");
+ if (_last_satb_filtering_times_ms.sum() > 0.0) {
+ _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
}
- print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
- if (G1Log::finest()) {
- print_par_stats(3, "Processed Buffers", _par_last_update_rs_processed_buffers,
- false /* showDecimals */);
- }
- print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
- print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
- print_par_stats(2, "Termination", _par_last_termination_times_ms);
+ _last_update_rs_times_ms.print(2, "Update RS (ms)");
+ _last_update_rs_processed_buffers.print(3, "Processed Buffers");
+ _last_scan_rs_times_ms.print(2, "Scan RS (ms)");
+ _last_obj_copy_times_ms.print(2, "Object Copy (ms)");
+ _last_termination_times_ms.print(2, "Termination (ms)");
if (G1Log::finest()) {
- print_par_stats(3, "Termination Attempts", _par_last_termination_attempts,
- false /* showDecimals */);
+ _last_termination_attempts.print(3, "Termination Attempts");
}
- print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
- print_par_stats(2, "GC Worker Total", _par_last_gc_worker_times_ms);
- print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
+ _last_gc_worker_other_times_ms.print(2, "GC Worker Other (ms)");
+ _last_gc_worker_times_ms.print(2, "GC Worker Total (ms)");
+ _last_gc_worker_end_times_ms.print(2, "GC Worker End (ms)");
} else {
- print_stats(1, "Ext Root Scanning", _ext_root_scan_time);
- if (_satb_filtering_time > 0.0) {
- print_stats(1, "SATB Filtering", _satb_filtering_time);
+ _last_ext_root_scan_times_ms.print(1, "Ext Root Scanning (ms)");
+ if (_last_satb_filtering_times_ms.sum() > 0.0) {
+ _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
}
- print_stats(1, "Update RS", _update_rs_time);
- if (G1Log::finest()) {
- print_stats(2, "Processed Buffers", (int)_update_rs_processed_buffers);
- }
- print_stats(1, "Scan RS", _scan_rs_time);
- print_stats(1, "Object Copying", _obj_copy_time);
+ _last_update_rs_times_ms.print(1, "Update RS (ms)");
+ _last_update_rs_processed_buffers.print(2, "Processed Buffers");
+ _last_scan_rs_times_ms.print(1, "Scan RS (ms)");
+ _last_obj_copy_times_ms.print(1, "Object Copy (ms)");
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
@@ -350,8 +288,11 @@
print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
}
}
- double misc_time_ms = pause_time_ms - accounted_time_ms();
+ double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
print_stats(1, "Other", misc_time_ms);
+ if (_cur_verify_before_time_ms > 0.0) {
+ print_stats(2, "Verify Before", _cur_verify_before_time_ms);
+ }
print_stats(2, "Choose CSet",
(_recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms));
@@ -360,6 +301,9 @@
print_stats(2, "Free CSet",
(_recorded_young_free_cset_time_ms +
_recorded_non_young_free_cset_time_ms));
+ if (_cur_verify_after_time_ms > 0.0) {
+ print_stats(2, "Verify After", _cur_verify_after_time_ms);
+ }
}
void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -28,52 +28,109 @@
#include "memory/allocation.hpp"
#include "gc_interface/gcCause.hpp"
+template <class T>
+class WorkerDataArray : public CHeapObj<mtGC> {
+ T* _data;
+ uint _length;
+ const char* _print_format;
+ bool _print_sum;
+
+ // We are caching the sum and average to only have to calculate them once.
+ // This is not done in an MT-safe way. It is intetened to allow single
+ // threaded code to call sum() and average() multiple times in any order
+ // without having to worry about the cost.
+ bool _has_new_data;
+ T _sum;
+ double _average;
+
+ public:
+ WorkerDataArray(uint length, const char* print_format, bool print_sum = true) :
+ _length(length), _print_format(print_format), _print_sum(print_sum), _has_new_data(true) {
+ assert(length > 0, "Must have some workers to store data for");
+ _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
+ }
+
+ ~WorkerDataArray() {
+ FREE_C_HEAP_ARRAY(T, _data, mtGC);
+ }
+
+ void set(uint worker_i, T value) {
+ assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
+ assert(_data[worker_i] == (T)-1, err_msg("Overwriting data for worker %d", worker_i));
+ _data[worker_i] = value;
+ _has_new_data = true;
+ }
+
+ T get(uint worker_i) {
+ assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
+ assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
+ return _data[worker_i];
+ }
+
+ void add(uint worker_i, T value) {
+ assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
+ assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
+ _data[worker_i] += value;
+ _has_new_data = true;
+ }
+
+ double average(){
+ if (_has_new_data) {
+ calculate_totals();
+ }
+ return _average;
+ }
+
+ T sum() {
+ if (_has_new_data) {
+ calculate_totals();
+ }
+ return _sum;
+ }
+
+ void print(int level, const char* title);
+
+ void reset() PRODUCT_RETURN;
+ void verify() PRODUCT_RETURN;
+
+ private:
+
+ void calculate_totals(){
+ _sum = (T)0;
+ for (uint i = 0; i < _length; ++i) {
+ _sum += _data[i];
+ }
+ _average = (double)_sum / (double)_length;
+ _has_new_data = false;
+ }
+};
+
class G1GCPhaseTimes : public CHeapObj<mtGC> {
- friend class G1CollectorPolicy;
- friend class TraceGen0TimeData;
private:
uint _active_gc_threads;
uint _max_gc_threads;
- GCCause::Cause _gc_cause;
- bool _is_young_gc;
- bool _is_initial_mark_gc;
-
- double _pause_start_time_sec;
-
- double* _par_last_gc_worker_start_times_ms;
- double* _par_last_ext_root_scan_times_ms;
- double* _par_last_satb_filtering_times_ms;
- double* _par_last_update_rs_times_ms;
- double* _par_last_update_rs_processed_buffers;
- double* _par_last_scan_rs_times_ms;
- double* _par_last_obj_copy_times_ms;
- double* _par_last_termination_times_ms;
- double* _par_last_termination_attempts;
- double* _par_last_gc_worker_end_times_ms;
- double* _par_last_gc_worker_times_ms;
- double* _par_last_gc_worker_other_times_ms;
+ WorkerDataArray<double> _last_gc_worker_start_times_ms;
+ WorkerDataArray<double> _last_ext_root_scan_times_ms;
+ WorkerDataArray<double> _last_satb_filtering_times_ms;
+ WorkerDataArray<double> _last_update_rs_times_ms;
+ WorkerDataArray<int> _last_update_rs_processed_buffers;
+ WorkerDataArray<double> _last_scan_rs_times_ms;
+ WorkerDataArray<double> _last_obj_copy_times_ms;
+ WorkerDataArray<double> _last_termination_times_ms;
+ WorkerDataArray<size_t> _last_termination_attempts;
+ WorkerDataArray<double> _last_gc_worker_end_times_ms;
+ WorkerDataArray<double> _last_gc_worker_times_ms;
+ WorkerDataArray<double> _last_gc_worker_other_times_ms;
double _cur_collection_par_time_ms;
-
double _cur_collection_code_root_fixup_time_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
double _cur_ref_enq_time_ms;
- // Helper methods for detailed logging
- void print_par_stats(int level, const char* str, double* data, bool showDecimals = true);
- void print_stats(int level, const char* str, double value);
- void print_stats(int level, const char* str, double value, int workers);
- void print_stats(int level, const char* str, int value);
- double avg_value(double* data);
- double max_value(double* data);
- double sum_of_values(double* data);
- double max_sum(double* data1, double* data2);
- double accounted_time_ms();
-
// Card Table Count Cache stats
double _min_clear_cc_time_ms; // min
double _max_clear_cc_time_ms; // max
@@ -81,19 +138,6 @@
double _cum_clear_cc_time_ms; // cummulative clearing time
jlong _num_cc_clears; // number of times the card count cache has been cleared
- // The following insance variables are directly accessed by G1CollectorPolicy
- // and TraceGen0TimeData. This is why those classes are declared friends.
- // An alternative is to add getters and setters for all of these fields.
- // It might also be possible to restructure the code to reduce these
- // dependencies.
- double _ext_root_scan_time;
- double _satb_filtering_time;
- double _update_rs_time;
- double _update_rs_processed_buffers;
- double _scan_rs_time;
- double _obj_copy_time;
- double _termination_time;
-
double _cur_collection_start_sec;
double _root_region_scan_wait_time_ms;
@@ -103,79 +147,58 @@
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
- void print(double pause_time_ms);
+ double _cur_verify_before_time_ms;
+ double _cur_verify_after_time_ms;
+
+ // Helper methods for detailed logging
+ void print_stats(int level, const char* str, double value);
+ void print_stats(int level, const char* str, double value, int workers);
public:
G1GCPhaseTimes(uint max_gc_threads);
- void note_gc_start(double pause_start_time_sec, uint active_gc_threads,
- bool is_young_gc, bool is_initial_mark_gc, GCCause::Cause gc_cause);
- void note_gc_end(double pause_end_time_sec);
- void collapse_par_times();
+ void note_gc_start(uint active_gc_threads);
+ void note_gc_end();
+ void print(double pause_time_sec);
void record_gc_worker_start_time(uint worker_i, double ms) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_gc_worker_start_times_ms[worker_i] = ms;
+ _last_gc_worker_start_times_ms.set(worker_i, ms);
}
void record_ext_root_scan_time(uint worker_i, double ms) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_ext_root_scan_times_ms[worker_i] = ms;
+ _last_ext_root_scan_times_ms.set(worker_i, ms);
}
void record_satb_filtering_time(uint worker_i, double ms) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_satb_filtering_times_ms[worker_i] = ms;
+ _last_satb_filtering_times_ms.set(worker_i, ms);
}
void record_update_rs_time(uint worker_i, double ms) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_update_rs_times_ms[worker_i] = ms;
+ _last_update_rs_times_ms.set(worker_i, ms);
}
- void record_update_rs_processed_buffers (uint worker_i,
- double processed_buffers) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_update_rs_processed_buffers[worker_i] = processed_buffers;
+ void record_update_rs_processed_buffers(uint worker_i, int processed_buffers) {
+ _last_update_rs_processed_buffers.set(worker_i, processed_buffers);
}
void record_scan_rs_time(uint worker_i, double ms) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_scan_rs_times_ms[worker_i] = ms;
- }
-
- void reset_obj_copy_time(uint worker_i) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_obj_copy_times_ms[worker_i] = 0.0;
- }
-
- void reset_obj_copy_time() {
- reset_obj_copy_time(0);
+ _last_scan_rs_times_ms.set(worker_i, ms);
}
void record_obj_copy_time(uint worker_i, double ms) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_obj_copy_times_ms[worker_i] += ms;
+ _last_obj_copy_times_ms.set(worker_i, ms);
+ }
+
+ void add_obj_copy_time(uint worker_i, double ms) {
+ _last_obj_copy_times_ms.add(worker_i, ms);
}
void record_termination(uint worker_i, double ms, size_t attempts) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_termination_times_ms[worker_i] = ms;
- _par_last_termination_attempts[worker_i] = (double) attempts;
+ _last_termination_times_ms.set(worker_i, ms);
+ _last_termination_attempts.set(worker_i, attempts);
}
void record_gc_worker_end_time(uint worker_i, double ms) {
- assert(worker_i >= 0, "worker index must be > 0");
- assert(worker_i < _active_gc_threads, "worker index out of bounds");
- _par_last_gc_worker_end_times_ms[worker_i] = ms;
+ _last_gc_worker_end_times_ms.set(worker_i, ms);
}
void record_clear_ct_time(double ms) {
@@ -211,6 +234,88 @@
void record_non_young_free_cset_time_ms(double time_ms) {
_recorded_non_young_free_cset_time_ms = time_ms;
}
+
+ void record_young_cset_choice_time_ms(double time_ms) {
+ _recorded_young_cset_choice_time_ms = time_ms;
+ }
+
+ void record_non_young_cset_choice_time_ms(double time_ms) {
+ _recorded_non_young_cset_choice_time_ms = time_ms;
+ }
+
+ void record_cur_collection_start_sec(double time_ms) {
+ _cur_collection_start_sec = time_ms;
+ }
+
+ void record_verify_before_time_ms(double time_ms) {
+ _cur_verify_before_time_ms = time_ms;
+ }
+
+ void record_verify_after_time_ms(double time_ms) {
+ _cur_verify_after_time_ms = time_ms;
+ }
+
+ double accounted_time_ms();
+
+ double cur_collection_start_sec() {
+ return _cur_collection_start_sec;
+ }
+
+ double cur_collection_par_time_ms() {
+ return _cur_collection_par_time_ms;
+ }
+
+ double cur_clear_ct_time_ms() {
+ return _cur_clear_ct_time_ms;
+ }
+
+ double root_region_scan_wait_time_ms() {
+ return _root_region_scan_wait_time_ms;
+ }
+
+ double young_cset_choice_time_ms() {
+ return _recorded_young_cset_choice_time_ms;
+ }
+
+ double young_free_cset_time_ms() {
+ return _recorded_young_free_cset_time_ms;
+ }
+
+ double non_young_cset_choice_time_ms() {
+ return _recorded_non_young_cset_choice_time_ms;
+ }
+
+ double non_young_free_cset_time_ms() {
+ return _recorded_non_young_free_cset_time_ms;
+ }
+
+ double average_last_update_rs_time() {
+ return _last_update_rs_times_ms.average();
+ }
+
+ int sum_last_update_rs_processed_buffers() {
+ return _last_update_rs_processed_buffers.sum();
+ }
+
+ double average_last_scan_rs_time(){
+ return _last_scan_rs_times_ms.average();
+ }
+
+ double average_last_obj_copy_time() {
+ return _last_obj_copy_times_ms.average();
+ }
+
+ double average_last_termination_time() {
+ return _last_termination_times_ms.average();
+ }
+
+ double average_last_ext_root_scan_time() {
+ return _last_ext_root_scan_times_ms.average();
+ }
+
+ double average_last_satb_filtering_times_ms() {
+ return _last_satb_filtering_times_ms.average();
+ }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -324,7 +324,7 @@
if (G1UseParallelRSetUpdating || (worker_i == 0)) {
updateRS(&into_cset_dcq, worker_i);
} else {
- _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0.0);
+ _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0);
_g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
}
if (G1UseParallelRSetScanning || (worker_i == 0)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -311,7 +311,35 @@
"as a percentage of the heap size.") \
\
experimental(ccstr, G1LogLevel, NULL, \
- "Log level for G1 logging: fine, finer, finest")
+ "Log level for G1 logging: fine, finer, finest") \
+ \
+ notproduct(bool, G1EvacuationFailureALot, false, \
+ "Force use of evacuation failure handling during certain " \
+ "evacuation pauses") \
+ \
+ develop(uintx, G1EvacuationFailureALotCount, 1000, \
+ "Number of successful evacuations between evacuation failures " \
+ "occurring at object copying") \
+ \
+ develop(uintx, G1EvacuationFailureALotInterval, 5, \
+ "Total collections between forced triggering of evacuation " \
+ "failures") \
+ \
+ develop(bool, G1EvacuationFailureALotDuringConcMark, true, \
+ "Force use of evacuation failure handling during evacuation " \
+ "pauses when marking is in progress") \
+ \
+ develop(bool, G1EvacuationFailureALotDuringInitialMark, true, \
+ "Force use of evacuation failure handling during initial mark " \
+ "evacuation pauses") \
+ \
+ develop(bool, G1EvacuationFailureALotDuringYoungGC, true, \
+ "Force use of evacuation failure handling during young " \
+ "evacuation pauses") \
+ \
+ develop(bool, G1EvacuationFailureALotDuringMixedGC, true, \
+ "Force use of evacuation failure handling during mixed " \
+ "evacuation pauses")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -424,6 +424,8 @@
|| code == _fconst_0 || code == _dconst_0); }
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
+ static bool has_optional_appendix(Code code) { return code == _invokedynamic || code == _invokehandle; }
+
static int compute_flags (const char* format, int more_flags = 0); // compute the flags
static int flags (int code, bool is_wide) {
assert(code == (u_char)code, "must be a byte");
--- a/hotspot/src/share/vm/opto/c2_globals.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -353,27 +353,9 @@
develop(bool, StressRecompilation, false, \
"Recompile each compiled method without subsuming loads or escape analysis.") \
\
- /* controls for tier 1 compilations */ \
- \
- develop(bool, Tier1CountInvocations, true, \
- "Generate code, during tier 1, to update invocation counter") \
- \
- product(intx, Tier1Inline, false, \
- "enable inlining during tier 1") \
- \
- product(intx, Tier1MaxInlineSize, 8, \
- "maximum bytecode size of a method to be inlined, during tier 1") \
- \
- product(intx, Tier1FreqInlineSize, 35, \
- "max bytecode size of a frequent method to be inlined, tier 1") \
- \
develop(intx, ImplicitNullCheckThreshold, 3, \
"Don't do implicit null checks if NPE's in a method exceeds limit") \
\
- /* controls for loop optimization */ \
- product(intx, Tier1LoopOptsCount, 0, \
- "Set level of loop optimization for tier 1 compiles") \
- \
product(intx, LoopOptsCount, 43, \
"Set level of loop optimization for tier 1 compiles") \
\
@@ -505,6 +487,116 @@
\
product(bool, BlockLayoutRotateLoops, true, \
"Allow back branches to be fall throughs in the block layour") \
+ \
+ develop(bool, InlineReflectionGetCallerClass, true, \
+ "inline sun.reflect.Reflection.getCallerClass(), known to be part "\
+ "of base library DLL") \
+ \
+ develop(bool, InlineObjectCopy, true, \
+ "inline Object.clone and Arrays.copyOf[Range] intrinsics") \
+ \
+ develop(bool, SpecialStringCompareTo, true, \
+ "special version of string compareTo") \
+ \
+ develop(bool, SpecialStringIndexOf, true, \
+ "special version of string indexOf") \
+ \
+ develop(bool, SpecialStringEquals, true, \
+ "special version of string equals") \
+ \
+ develop(bool, SpecialArraysEquals, true, \
+ "special version of Arrays.equals(char[],char[])") \
+ \
+ develop(bool, BailoutToInterpreterForThrows, false, \
+ "Compiled methods which throws/catches exceptions will be " \
+ "deopt and intp.") \
+ \
+ develop(bool, ConvertCmpD2CmpF, true, \
+ "Convert cmpD to cmpF when one input is constant in float range") \
+ \
+ develop(bool, ConvertFloat2IntClipping, true, \
+ "Convert float2int clipping idiom to integer clipping") \
+ \
+ develop(bool, Use24BitFPMode, true, \
+ "Set 24-bit FPU mode on a per-compile basis ") \
+ \
+ develop(bool, Use24BitFP, true, \
+ "use FP instructions that produce 24-bit precise results") \
+ \
+ develop(bool, MonomorphicArrayCheck, true, \
+ "Uncommon-trap array store checks that require full type check") \
+ \
+ notproduct(bool, TracePhaseCCP, false, \
+ "Print progress during Conditional Constant Propagation") \
+ \
+ develop(bool, PrintDominators, false, \
+ "Print out dominator trees for GVN") \
+ \
+ notproduct(bool, TraceSpilling, false, \
+ "Trace spilling") \
+ \
+ notproduct(bool, TraceTypeProfile, false, \
+ "Trace type profile") \
+ \
+ develop(bool, PoisonOSREntry, true, \
+ "Detect abnormal calls to OSR code") \
+ \
+ product(bool, UseCondCardMark, false, \
+ "Check for already marked card before updating card table") \
+ \
+ develop(bool, SoftMatchFailure, trueInProduct, \
+ "If the DFA fails to match a node, print a message and bail out") \
+ \
+ develop(bool, InlineAccessors, true, \
+ "inline accessor methods (get/set)") \
+ \
+ product(intx, TypeProfileMajorReceiverPercent, 90, \
+ "% of major receiver type to all profiled receivers") \
+ \
+ notproduct(bool, TimeCompiler2, false, \
+ "detailed time the compiler (requires +TimeCompiler)") \
+ \
+ diagnostic(bool, PrintIntrinsics, false, \
+ "prints attempted and successful inlining of intrinsics") \
+ \
+ diagnostic(ccstrlist, DisableIntrinsic, "", \
+ "do not expand intrinsics whose (internal) names appear here") \
+ \
+ develop(bool, StressReflectiveCode, false, \
+ "Use inexact types at allocations, etc., to test reflection") \
+ \
+ diagnostic(bool, DebugInlinedCalls, true, \
+ "If false, restricts profiled locations to the root method only") \
+ \
+ notproduct(bool, VerifyLoopOptimizations, false, \
+ "verify major loop optimizations") \
+ \
+ diagnostic(bool, ProfileDynamicTypes, true, \
+ "do extra type profiling and use it more aggressively") \
+ \
+ develop(bool, TraceIterativeGVN, false, \
+ "Print progress during Iterative Global Value Numbering") \
+ \
+ develop(bool, VerifyIterativeGVN, false, \
+ "Verify Def-Use modifications during sparse Iterative Global " \
+ "Value Numbering") \
+ \
+ notproduct(bool, TraceCISCSpill, false, \
+ "Trace allocators use of cisc spillable instructions") \
+ \
+ product(bool, SplitIfBlocks, true, \
+ "Clone compares and control flow through merge points to fold " \
+ "some branches") \
+ \
+ develop(intx, FreqCountInvocations, 1, \
+ "Scaling factor for branch frequencies (deprecated)") \
+ \
+ product(intx, AliasLevel, 3, \
+ "0 for no aliasing, 1 for oop/field/static/array split, " \
+ "2 for class split, 3 for unique instances") \
+ \
+ develop(bool, VerifyAliases, false, \
+ "perform extra checks on the results of alias analysis") \
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/opto/chaitin.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -222,6 +222,7 @@
_alternate = 0;
_matcher._allocation_started = true;
+ ResourceArea split_arena; // Arena for Split local resources
ResourceArea live_arena; // Arena for liveness & IFG info
ResourceMark rm(&live_arena);
@@ -324,7 +325,7 @@
// Bail out if unique gets too large (ie - unique > MaxNodeLimit)
C->check_node_count(10*must_spill, "out of nodes before split");
if (C->failing()) return;
- _maxlrg = Split( _maxlrg ); // Split spilling LRG everywhere
+ _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere
// Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
// or we failed to split
C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
@@ -390,7 +391,7 @@
}
if( !_maxlrg ) return;
- _maxlrg = Split( _maxlrg ); // Split spilling LRG everywhere
+ _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere
// Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split");
if (C->failing()) return;
--- a/hotspot/src/share/vm/opto/chaitin.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -470,7 +470,7 @@
// Split uncolorable live ranges
// Return new number of live ranges
- uint Split( uint maxlrg );
+ uint Split(uint maxlrg, ResourceArea* split_arena);
// Copy 'was_spilled'-edness from one Node to another.
void copy_was_spilled( Node *src, Node *dst );
--- a/hotspot/src/share/vm/opto/doCall.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/doCall.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -341,25 +341,26 @@
kill_dead_locals();
// Set frequently used booleans
- bool is_virtual = bc() == Bytecodes::_invokevirtual;
- bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
- bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
- bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
+ const bool is_virtual = bc() == Bytecodes::_invokevirtual;
+ const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
+ const bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
// Find target being called
bool will_link;
- ciMethod* bc_callee = iter().get_method(will_link); // actual callee from bytecode
- ciInstanceKlass* holder_klass = bc_callee->holder();
- ciKlass* holder = iter().get_declared_method_holder();
+ ciSignature* declared_signature = NULL;
+ ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode
+ ciInstanceKlass* holder_klass = orig_callee->holder();
+ ciKlass* holder = iter().get_declared_method_holder();
ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
+ assert(declared_signature != NULL, "cannot be null");
// uncommon-trap when callee is unloaded, uninitialized or will not link
// bailout when too many arguments for register representation
- if (!will_link || can_not_compile_call_site(bc_callee, klass)) {
+ if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
#ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
- bc_callee->print_name(); tty->cr();
+ orig_callee->print_name(); tty->cr();
}
#endif
return;
@@ -372,7 +373,7 @@
// Note: In the absence of miranda methods, an abstract class K can perform
// an invokevirtual directly on an interface method I.m if K implements I.
- const int nargs = bc_callee->arg_size();
+ const int nargs = orig_callee->arg_size();
// Push appendix argument (MethodType, CallSite, etc.), if one.
if (iter().has_appendix()) {
@@ -392,13 +393,13 @@
// Choose call strategy.
bool call_is_virtual = is_virtual_or_interface;
int vtable_index = methodOopDesc::invalid_vtable_index;
- ciMethod* callee = bc_callee;
+ ciMethod* callee = orig_callee;
// Try to get the most accurate receiver type
if (is_virtual_or_interface) {
Node* receiver_node = stack(sp() - nargs);
const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
- ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type);
+ ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, orig_callee, receiver_type);
// Have the call been sufficiently improved such that it is no longer a virtual?
if (optimized_virtual_method != NULL) {
@@ -425,7 +426,8 @@
// It decides whether inlining is desirable or not.
CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
- bc_callee = callee = NULL; // don't use bc_callee and callee after this point
+ // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
+ orig_callee = callee = NULL;
// ---------------------
// Round double arguments before call
@@ -497,9 +499,9 @@
round_double_result(cg->method());
ciType* rtype = cg->method()->return_type();
- if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) {
+ if (Bytecodes::has_optional_appendix(iter().cur_bc_raw())) {
// Be careful here with return types.
- ciType* ctype = iter().get_declared_method_signature()->return_type();
+ ciType* ctype = declared_signature->return_type();
if (ctype != rtype) {
BasicType rt = rtype->basic_type();
BasicType ct = ctype->basic_type();
@@ -528,15 +530,13 @@
} else if (rt == T_OBJECT || rt == T_ARRAY) {
assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
if (ctype->is_loaded()) {
- Node* if_fail = top();
- retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail);
- if (if_fail != top()) {
- PreserveJVMState pjvms(this);
- set_control(if_fail);
- builtin_throw(Deoptimization::Reason_class_check);
+ const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
+ const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
+ if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
+ Node* cast_obj = _gvn.transform(new (C, 2) CheckCastPPNode(control(), retnode, sig_type));
+ pop();
+ push(cast_obj);
}
- pop();
- push(retnode);
}
} else {
assert(ct == rt, err_msg_res("unexpected mismatch rt=%d, ct=%d", rt, ct));
--- a/hotspot/src/share/vm/opto/graphKit.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -1006,11 +1006,11 @@
case Bytecodes::_putfield:
{
bool is_get = (depth >= 0), is_static = (depth & 1);
- bool ignore;
ciBytecodeStream iter(method());
iter.reset_to_bci(bci());
iter.next();
- ciField* field = iter.get_field(ignore);
+ bool ignored_will_link;
+ ciField* field = iter.get_field(ignored_will_link);
int size = field->type()->size();
inputs = (is_static ? 0 : 1);
if (is_get) {
@@ -1028,11 +1028,13 @@
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
{
- bool ignore;
ciBytecodeStream iter(method());
iter.reset_to_bci(bci());
iter.next();
- ciMethod* callee = iter.get_method(ignore);
+ bool ignored_will_link;
+ ciSignature* declared_signature = NULL;
+ ciMethod* callee = iter.get_method(ignored_will_link, &declared_signature);
+ assert(declared_signature != NULL, "cannot be null");
// (Do not use ciMethod::arg_size(), because
// it might be an unloaded method, which doesn't
// know whether it is static or not.)
@@ -1046,7 +1048,7 @@
// remove any appendix arguments that were popped.
inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0);
}
- int size = callee->return_type()->size();
+ int size = declared_signature->return_type()->size();
depth = size - inputs;
}
break;
--- a/hotspot/src/share/vm/opto/loopTransform.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -547,11 +547,6 @@
Node *nnn = old_new[old->_idx];
if (!has_ctrl(nnn))
set_idom(nnn, idom(nnn), dd-1);
- // While we're at it, remove any SafePoints from the peeled code
- if (old->Opcode() == Op_SafePoint) {
- Node *nnn = old_new[old->_idx];
- lazy_replace(nnn,nnn->in(TypeFunc::Control));
- }
}
// Now force out all loop-invariant dominating tests. The optimizer
--- a/hotspot/src/share/vm/opto/loopnode.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -577,6 +577,9 @@
Node *sfpt = x->in(LoopNode::LoopBackControl);
if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
lazy_replace( sfpt, iftrue );
+ if (loop->_safepts != NULL) {
+ loop->_safepts->yank(sfpt);
+ }
loop->_tail = iftrue;
}
@@ -668,8 +671,12 @@
// Check for immediately preceding SafePoint and remove
Node *sfpt2 = le->in(0);
- if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2))
+ if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) {
lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
+ if (loop->_safepts != NULL) {
+ loop->_safepts->yank(sfpt2);
+ }
+ }
// Free up intermediate goo
_igvn.remove_dead_node(hook);
@@ -1526,10 +1533,8 @@
void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) {
// Bottom up traversal
IdealLoopTree* ch = _child;
- while (ch != NULL) {
- ch->check_safepts(visited, stack);
- ch = ch->_next;
- }
+ if (_child) _child->check_safepts(visited, stack);
+ if (_next) _next ->check_safepts(visited, stack);
if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) {
bool has_call = false; // call on dom-path
@@ -1702,29 +1707,39 @@
phase->is_counted_loop(_head, this)) {
_has_sfpt = 1; // Indicate we do not need a safepoint here
- // Look for a safepoint to remove
- for (Node* n = tail(); n != _head; n = phase->idom(n))
- if (n->Opcode() == Op_SafePoint && phase->get_loop(n) == this &&
- phase->is_deleteable_safept(n))
- phase->lazy_replace(n,n->in(TypeFunc::Control));
+ // Look for safepoints to remove.
+ Node_List* sfpts = _safepts;
+ if (sfpts != NULL) {
+ for (uint i = 0; i < sfpts->size(); i++) {
+ Node* n = sfpts->at(i);
+ assert(phase->get_loop(n) == this, "");
+ if (phase->is_deleteable_safept(n)) {
+ phase->lazy_replace(n, n->in(TypeFunc::Control));
+ }
+ }
+ }
// Look for induction variables
phase->replace_parallel_iv(this);
} else if (_parent != NULL && !_irreducible) {
// Not a counted loop.
- // Look for a safepoint on the idom-path to remove, preserving the first one
- bool found = false;
- Node* n = tail();
- for (; n != _head && !found; n = phase->idom(n)) {
- if (n->Opcode() == Op_SafePoint && phase->get_loop(n) == this)
- found = true; // Found one
+ // Look for a safepoint on the idom-path.
+ Node* sfpt = tail();
+ for (; sfpt != _head; sfpt = phase->idom(sfpt)) {
+ if (sfpt->Opcode() == Op_SafePoint && phase->get_loop(sfpt) == this)
+ break; // Found one
}
- // Skip past it and delete the others
- for (; n != _head; n = phase->idom(n)) {
- if (n->Opcode() == Op_SafePoint && phase->get_loop(n) == this &&
- phase->is_deleteable_safept(n))
- phase->lazy_replace(n,n->in(TypeFunc::Control));
+ // Delete other safepoints in this loop.
+ Node_List* sfpts = _safepts;
+ if (sfpts != NULL && sfpt != _head && sfpt->Opcode() == Op_SafePoint) {
+ for (uint i = 0; i < sfpts->size(); i++) {
+ Node* n = sfpts->at(i);
+ assert(phase->get_loop(n) == this, "");
+ if (n != sfpt && phase->is_deleteable_safept(n)) {
+ phase->lazy_replace(n, n->in(TypeFunc::Control));
+ }
+ }
}
}
@@ -2766,6 +2781,10 @@
// if the allocation is not eliminated for some reason.
innermost->_allow_optimizations = false;
innermost->_has_call = 1; // = true
+ } else if (n->Opcode() == Op_SafePoint) {
+ // Record all safepoints in this loop.
+ if (innermost->_safepts == NULL) innermost->_safepts = new Node_List();
+ innermost->_safepts->push(n);
}
}
}
@@ -2816,6 +2835,9 @@
is_deleteable_safept(n)) {
Node *in = n->in(TypeFunc::Control);
lazy_replace(n,in); // Pull safepoint now
+ if (ilt->_safepts != NULL) {
+ ilt->_safepts->yank(n);
+ }
// Carry on with the recursion "as if" we are walking
// only the control input
if( !visited.test_set( in->_idx ) ) {
--- a/hotspot/src/share/vm/opto/loopnode.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -336,6 +336,7 @@
_has_sfpt:1, // True if has non-call safepoint
_rce_candidate:1; // True if candidate for range check elimination
+ Node_List* _safepts; // List of safepoints in this loop
Node_List* _required_safept; // A inner loop cannot delete these safepts;
bool _allow_optimizations; // Allow loop optimizations
@@ -343,6 +344,7 @@
: _parent(0), _next(0), _child(0),
_head(head), _tail(tail),
_phase(phase),
+ _safepts(NULL),
_required_safept(NULL),
_allow_optimizations(true),
_nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0)
--- a/hotspot/src/share/vm/opto/reg_split.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/reg_split.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -449,9 +449,12 @@
// USES: If USE is in HRP, split at use to leave main LRG on stack.
// Else, hoist LRG back up to register only (ie - split is also DEF)
// We will compute a new maxlrg as we go
-uint PhaseChaitin::Split( uint maxlrg ) {
+uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
NOT_PRODUCT( Compile::TracePhase t3("regAllocSplit", &_t_regAllocSplit, TimeCompiler); )
+ // Free thread local resources used by this method on exit.
+ ResourceMark rm(split_arena);
+
uint bidx, pidx, slidx, insidx, inpidx, twoidx;
uint non_phi = 1, spill_cnt = 0;
Node **Reachblock;
@@ -461,14 +464,17 @@
bool u1, u2, u3;
Block *b, *pred;
PhiNode *phi;
- GrowableArray<uint> lidxs;
+ GrowableArray<uint> lidxs(split_arena, _maxlrg, 0, 0);
// Array of counters to count splits per live range
- GrowableArray<uint> splits;
+ GrowableArray<uint> splits(split_arena, _maxlrg, 0, 0);
+
+#define NEW_SPLIT_ARRAY(type, size)\
+ (type*) split_arena->allocate_bytes((size) * sizeof(type))
//----------Setup Code----------
// Create a convenient mapping from lrg numbers to reaches/leaves indices
- uint *lrg2reach = NEW_RESOURCE_ARRAY( uint, _maxlrg );
+ uint *lrg2reach = NEW_SPLIT_ARRAY( uint, _maxlrg );
// Keep track of DEFS & Phis for later passes
defs = new Node_List();
phis = new Node_List();
@@ -500,15 +506,15 @@
// a Def is UP or DOWN. UP means that it should get a register (ie -
// it is always in LRP regions), and DOWN means that it is probably
// on the stack (ie - it crosses HRP regions).
- Node ***Reaches = NEW_RESOURCE_ARRAY( Node**, _cfg._num_blocks+1 );
- bool **UP = NEW_RESOURCE_ARRAY( bool*, _cfg._num_blocks+1 );
- Node **debug_defs = NEW_RESOURCE_ARRAY( Node*, spill_cnt );
- VectorSet **UP_entry= NEW_RESOURCE_ARRAY( VectorSet*, spill_cnt );
+ Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg._num_blocks+1 );
+ bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg._num_blocks+1 );
+ Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt );
+ VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt );
// Initialize Reaches & UP
for( bidx = 0; bidx < _cfg._num_blocks+1; bidx++ ) {
- Reaches[bidx] = NEW_RESOURCE_ARRAY( Node*, spill_cnt );
- UP[bidx] = NEW_RESOURCE_ARRAY( bool, spill_cnt );
+ Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt );
+ UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt );
Node **Reachblock = Reaches[bidx];
bool *UPblock = UP[bidx];
for( slidx = 0; slidx < spill_cnt; slidx++ ) {
@@ -517,9 +523,11 @@
}
}
+#undef NEW_SPLIT_ARRAY
+
// Initialize to array of empty vectorsets
for( slidx = 0; slidx < spill_cnt; slidx++ )
- UP_entry[slidx] = new VectorSet(Thread::current()->resource_area());
+ UP_entry[slidx] = new VectorSet(split_arena);
//----------PASS 1----------
//----------Propagation & Node Insertion Code----------
--- a/hotspot/src/share/vm/opto/runtime.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -880,46 +880,6 @@
}
JRT_END
-//-----------------------------------------------------------------------------
-// implicit exception support.
-
-static void report_null_exception_in_code_cache(address exception_pc) {
- ResourceMark rm;
- CodeBlob* n = CodeCache::find_blob(exception_pc);
- if (n != NULL) {
- tty->print_cr("#");
- tty->print_cr("# HotSpot Runtime Error, null exception in generated code");
- tty->print_cr("#");
- tty->print_cr("# pc where exception happened = " INTPTR_FORMAT, exception_pc);
-
- if (n->is_nmethod()) {
- methodOop method = ((nmethod*)n)->method();
- tty->print_cr("# Method where it happened %s.%s ", Klass::cast(method->method_holder())->name()->as_C_string(), method->name()->as_C_string());
- tty->print_cr("#");
- if (ShowMessageBoxOnError && UpdateHotSpotCompilerFileOnError &&
- CompilerOracle::has_command_file()) {
- const char* title = "HotSpot Runtime Error";
- const char* question = "Do you want to exclude compilation of this method in future runs?";
- if (os::message_box(title, question)) {
- CompilerOracle::append_comment_to_file("");
- CompilerOracle::append_comment_to_file("Null exception in compiled code resulted in the following exclude");
- CompilerOracle::append_comment_to_file("");
- CompilerOracle::append_exclude_to_file(method);
- tty->print_cr("#");
- tty->print_cr("# %s has been updated to exclude the specified method", CompileCommandFile);
- tty->print_cr("#");
- }
- }
- fatal("Implicit null exception happened in compiled method");
- } else {
- n->print();
- fatal("Implicit null exception happened in generated stub");
- }
- }
- fatal("Implicit null exception at wrong place");
-}
-
-
//-------------------------------------------------------------------------------------
// register policy
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -1916,7 +1916,7 @@
(ExplicitGCInvokesConcurrent ||
ExplicitGCInvokesConcurrentAndUnloadsClasses)) {
jio_fprintf(defaultStream::error_stream(),
- "error: +ExplictGCInvokesConcurrent[AndUnloadsClasses] conflicts"
+ "error: +ExplicitGCInvokesConcurrent[AndUnloadsClasses] conflicts"
" with -UseAsyncConcMarkSweepGC");
status = false;
}
--- a/hotspot/src/share/vm/runtime/globals.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/runtime/globals.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -43,7 +43,6 @@
#include "shark/shark_globals.hpp"
#endif
-
RUNTIME_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, \
@@ -55,6 +54,10 @@
MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+ARCH_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, \
+ MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, \
+ MATERIALIZE_NOTPRODUCT_FLAG)
+
MATERIALIZE_FLAGS_EXT
@@ -212,7 +215,6 @@
#define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1 notproduct}", DEFAULT },
#endif
-
#define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 product}", DEFAULT },
#define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 pd product}", DEFAULT },
#define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 diagnostic}", DEFAULT },
@@ -227,6 +229,17 @@
#define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C2 notproduct}", DEFAULT },
#endif
+#define ARCH_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH product}", DEFAULT },
+#define ARCH_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH diagnostic}", DEFAULT },
+#define ARCH_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH experimental}", DEFAULT },
+#ifdef PRODUCT
+ #define ARCH_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+ #define ARCH_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+#else
+ #define ARCH_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{ARCH}", DEFAULT },
+ #define ARCH_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{ARCH notproduct}", DEFAULT },
+#endif
+
#define SHARK_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark product}", DEFAULT },
#define SHARK_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark pd product}", DEFAULT },
#define SHARK_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark diagnostic}", DEFAULT },
@@ -255,6 +268,7 @@
#ifdef SHARK
SHARK_FLAGS(SHARK_DEVELOP_FLAG_STRUCT, SHARK_PD_DEVELOP_FLAG_STRUCT, SHARK_PRODUCT_FLAG_STRUCT, SHARK_PD_PRODUCT_FLAG_STRUCT, SHARK_DIAGNOSTIC_FLAG_STRUCT, SHARK_NOTPRODUCT_FLAG_STRUCT)
#endif
+ ARCH_FLAGS(ARCH_DEVELOP_FLAG_STRUCT, ARCH_PRODUCT_FLAG_STRUCT, ARCH_DIAGNOSTIC_FLAG_STRUCT, ARCH_EXPERIMENTAL_FLAG_STRUCT, ARCH_NOTPRODUCT_FLAG_STRUCT)
FLAGTABLE_EXT
{0, NULL, NULL}
};
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -530,12 +530,6 @@
product(intx, UseSSE, 99, \
"Highest supported SSE instructions set on x86/x64") \
\
- product(intx, UseAVX, 99, \
- "Highest supported AVX instructions set on x86/x64") \
- \
- product(intx, UseVIS, 99, \
- "Highest supported VIS instructions set on Sparc") \
- \
product(uintx, LargePageSizeInBytes, 0, \
"Large page size (0 to let VM choose the page size") \
\
@@ -572,10 +566,6 @@
product(bool, PrintVMQWaitTime, false, \
"Prints out the waiting time in VM operation queue") \
\
- develop(bool, BailoutToInterpreterForThrows, false, \
- "Compiled methods which throws/catches exceptions will be " \
- "deopt and intp.") \
- \
develop(bool, NoYieldsInMicrolock, false, \
"Disable yields in microlock") \
\
@@ -618,9 +608,6 @@
"inline Object::hashCode() native that is known to be part " \
"of base library DLL") \
\
- develop(bool, InlineObjectCopy, true, \
- "inline Object.clone and Arrays.copyOf[Range] intrinsics") \
- \
develop(bool, InlineNatives, true, \
"inline natives that are known to be part of base library DLL") \
\
@@ -633,31 +620,9 @@
develop(bool, InlineThreadNatives, true, \
"inline Thread.currentThread, etc") \
\
- develop(bool, InlineReflectionGetCallerClass, true, \
- "inline sun.reflect.Reflection.getCallerClass(), known to be part "\
- "of base library DLL") \
- \
develop(bool, InlineUnsafeOps, true, \
"inline memory ops (native methods) from sun.misc.Unsafe") \
\
- develop(bool, ConvertCmpD2CmpF, true, \
- "Convert cmpD to cmpF when one input is constant in float range") \
- \
- develop(bool, ConvertFloat2IntClipping, true, \
- "Convert float2int clipping idiom to integer clipping") \
- \
- develop(bool, SpecialStringCompareTo, true, \
- "special version of string compareTo") \
- \
- develop(bool, SpecialStringIndexOf, true, \
- "special version of string indexOf") \
- \
- develop(bool, SpecialStringEquals, true, \
- "special version of string equals") \
- \
- develop(bool, SpecialArraysEquals, true, \
- "special version of Arrays.equals(char[],char[])") \
- \
product(bool, CriticalJNINatives, true, \
"check for critical JNI entry points") \
\
@@ -667,9 +632,6 @@
product(bool, UseSSE42Intrinsics, false, \
"SSE4.2 versions of intrinsics") \
\
- product(bool, UseCondCardMark, false, \
- "Check for already marked card before updating card table") \
- \
develop(bool, TraceCallFixup, false, \
"traces all call fixups") \
\
@@ -756,9 +718,6 @@
develop(bool, ForceFloatExceptions, trueInDebug, \
"Force exceptions on FP stack under/overflow") \
\
- develop(bool, SoftMatchFailure, trueInProduct, \
- "If the DFA fails to match a node, print a message and bail out") \
- \
develop(bool, VerifyStackAtCalls, false, \
"Verify that the stack pointer is unchanged after calls") \
\
@@ -915,15 +874,6 @@
"1: allow scavenging from the code cache; " \
"2: emit as many constants as the compiler can see") \
\
- diagnostic(bool, TraceOSRBreakpoint, false, \
- "Trace OSR Breakpoint ") \
- \
- diagnostic(bool, TraceCompileTriggered, false, \
- "Trace compile triggered") \
- \
- diagnostic(bool, TraceTriggers, false, \
- "Trace triggers") \
- \
product(bool, AlwaysRestoreFPU, false, \
"Restore the FPU control word after every JNI call (expensive)") \
\
@@ -1037,9 +987,6 @@
develop(bool, UsePrivilegedStack, true, \
"Enable the security JVM functions") \
\
- develop(bool, IEEEPrecision, true, \
- "Enables IEEE precision (for INTEL only)") \
- \
develop(bool, ProtectionDomainVerification, true, \
"Verifies protection domain before resolution in system " \
"dictionary") \
@@ -1109,8 +1056,6 @@
"(Unsafe,Unstable) " \
" Controls emission of inline sync fast-path code") \
\
- product(intx, AlwaysInflate, 0, "(Unstable) Force inflation") \
- \
product(intx, MonitorBound, 0, "Bound Monitor population") \
\
product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \
@@ -1118,9 +1063,6 @@
product(intx, Atomics, 0, \
"(Unsafe,Unstable) Diagnostic - Controls emission of atomics") \
\
- product(intx, FenceInstruction, 0, \
- "(Unsafe,Unstable) Experimental") \
- \
product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \
\
product(intx, SyncVerbose, 0, "(Unstable)" ) \
@@ -1150,10 +1092,6 @@
"call thr_setconcurrency at thread create time to avoid " \
"LWP starvation on MP systems (For Solaris Only)") \
\
- develop(bool, UpdateHotSpotCompilerFileOnError, true, \
- "Should the system attempt to update the compiler file when " \
- "an error occurs?") \
- \
product(bool, ReduceSignalUsage, false, \
"Reduce the use of OS signals in Java and/or the VM") \
\
@@ -1188,15 +1126,6 @@
"Use alternate signals instead of SIGUSR1 & SIGUSR2 for VM " \
"internal signals (Solaris only)") \
\
- product(bool, UseSpinning, false, \
- "Use spinning in monitor inflation and before entry") \
- \
- product(bool, PreSpinYield, false, \
- "Yield before inner spinning loop") \
- \
- product(bool, PostSpinYield, true, \
- "Yield after inner spinning loop") \
- \
product(bool, AllowJNIEnvProxy, false, \
"Allow JNIEnv proxies for jdbx") \
\
@@ -1225,39 +1154,9 @@
product(bool, LazyBootClassLoader, true, \
"Enable/disable lazy opening of boot class path entries") \
\
- diagnostic(bool, UseIncDec, true, \
- "Use INC, DEC instructions on x86") \
- \
- product(bool, UseNewLongLShift, false, \
- "Use optimized bitwise shift left") \
- \
- product(bool, UseStoreImmI16, true, \
- "Use store immediate 16-bits value instruction on x86") \
- \
- product(bool, UseAddressNop, false, \
- "Use '0F 1F [addr]' NOP instructions on x86 cpus") \
- \
- product(bool, UseXmmLoadAndClearUpper, true, \
- "Load low part of XMM register and clear upper part") \
- \
- product(bool, UseXmmRegToRegMoveAll, false, \
- "Copy all XMM register bits when moving value between registers") \
- \
- product(bool, UseXmmI2D, false, \
- "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \
- \
- product(bool, UseXmmI2F, false, \
- "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \
- \
product(bool, UseXMMForArrayCopy, false, \
"Use SSE2 MOVQ instruction for Arraycopy") \
\
- product(bool, UseUnalignedLoadStores, false, \
- "Use SSE2 MOVDQU instruction for Arraycopy") \
- \
- product(bool, UseCBCond, false, \
- "Use compare and branch instruction on SPARC") \
- \
product(intx, FieldsAllocationStyle, 1, \
"0 - type based with oops first, 1 - with oops last, " \
"2 - oops in super and sub classes are together") \
@@ -1387,9 +1286,6 @@
develop(bool, TraceStartupTime, false, \
"Trace setup time") \
\
- product(ccstr, HPILibPath, NULL, \
- "Specify alternate path to HPI library") \
- \
develop(bool, TraceProtectionDomainVerification, false, \
"Trace protection domain verifcation") \
\
@@ -1405,10 +1301,6 @@
product(bool, TraceMonitorInflation, false, \
"Trace monitor inflation in JVM") \
\
- /* assembler */ \
- product(bool, Use486InstrsOnly, false, \
- "Use 80486 Compliant instruction subset") \
- \
/* gc */ \
\
product(bool, UseSerialGC, false, \
@@ -1467,9 +1359,6 @@
develop(uintx, ParallelOldGCSplitInterval, 3, \
"How often to provoke splitting a young gen space") \
\
- develop(bool, TraceRegionTasksQueuing, false, \
- "Trace the queuing of the region tasks") \
- \
product(uintx, ConcGCThreads, 0, \
"Number of threads concurrent gc will use") \
\
@@ -1621,10 +1510,6 @@
"The gain in the feedback loop for on-the-fly PLAB resizing" \
" during a scavenge") \
\
- product(uintx, CMSOldPLABReactivityCeiling, 10, \
- "The clamping of the gain in the feedback loop for on-the-fly" \
- " PLAB resizing during a scavenge") \
- \
product(bool, AlwaysPreTouch, false, \
"It forces all freshly committed pages to be pre-touched.") \
\
@@ -1632,12 +1517,6 @@
"The maximum size of young gen chosen by default per GC worker " \
"thread available") \
\
- product(bool, GCOverheadReporting, false, \
- "Enables the GC overhead reporting facility") \
- \
- product(intx, GCOverheadReportingPeriodMS, 100, \
- "Reporting period for conc GC overhead reporting, in ms ") \
- \
product(bool, CMSIncrementalMode, false, \
"Whether CMS GC should operate in \"incremental\" mode") \
\
@@ -2017,9 +1896,6 @@
experimental(uintx, WorkStealingSpinToYieldRatio, 10, \
"Ratio of hard spins to calls to yield") \
\
- product(uintx, PreserveMarkStackSize, 1024, \
- "Size for stack used in promotion failure handling") \
- \
develop(uintx, ObjArrayMarkingStride, 512, \
"Number of ObjArray elements to push onto the marking stack" \
"before pushing a continuation entry") \
@@ -2044,18 +1920,6 @@
product(bool, TLABStats, true, \
"Print various TLAB related information") \
\
- product(bool, UseBlockZeroing, false, \
- "Use special cpu instructions for block zeroing") \
- \
- product(intx, BlockZeroingLowLimit, 2048, \
- "Minimum size in bytes when block zeroing will be used") \
- \
- product(bool, UseBlockCopy, false, \
- "Use special cpu instructions for block copy") \
- \
- product(intx, BlockCopyLowLimit, 2048, \
- "Minimum size in bytes when block copy will be used") \
- \
product(bool, PrintRevisitStats, false, \
"Print revisit (klass and MDO) stack related information") \
\
@@ -2248,9 +2112,6 @@
product(intx, PrefetchFieldsAhead, -1, \
"How many fields ahead to prefetch in oop scan (<= 0 means off)") \
\
- develop(bool, UsePrefetchQueue, true, \
- "Use the prefetch queue during PS promotion") \
- \
diagnostic(bool, VerifyBeforeExit, trueInDebug, \
"Verify system before exiting") \
\
@@ -2486,27 +2347,9 @@
develop(bool, CITraceTypeFlow, false, \
"detailed per-bytecode tracing of ciTypeFlow analysis") \
\
- develop(intx, CICloneLoopTestLimit, 100, \
- "size limit for blocks heuristically cloned in ciTypeFlow") \
- \
develop(intx, OSROnlyBCI, -1, \
"OSR only at this bci. Negative values mean exclude that bci") \
\
- /* temp diagnostics */ \
- \
- diagnostic(bool, TraceRedundantCompiles, false, \
- "Have compile broker print when a request already in the queue is"\
- " requested again") \
- \
- diagnostic(bool, InitialCompileFast, false, \
- "Initial compile at CompLevel_fast_compile") \
- \
- diagnostic(bool, InitialCompileReallyFast, false, \
- "Initial compile at CompLevel_really_fast_compile (no profile)") \
- \
- diagnostic(bool, FullProfileOnReInterpret, true, \
- "On re-interpret unc-trap compile next at CompLevel_fast_compile")\
- \
/* compiler */ \
\
product(intx, CICompilerCount, CI_COMPILER_COUNT, \
@@ -2520,12 +2363,6 @@
"proper StackOverflow handling; disable only to measure cost " \
"of stackbanging)") \
\
- develop(bool, Use24BitFPMode, true, \
- "Set 24-bit FPU mode on a per-compile basis ") \
- \
- develop(bool, Use24BitFP, true, \
- "use FP instructions that produce 24-bit precise results") \
- \
develop(bool, UseStrictFP, true, \
"use strict fp if modifier strictfp is set") \
\
@@ -2557,9 +2394,6 @@
"print the break down of clean up tasks performed during" \
" safepoint") \
\
- develop(bool, InlineAccessors, true, \
- "inline accessor methods (get/set)") \
- \
product(bool, Inline, true, \
"enable inlining") \
\
@@ -2572,33 +2406,15 @@
product(bool, UseTypeProfile, true, \
"Check interpreter profile for historically monomorphic calls") \
\
- product(intx, TypeProfileMajorReceiverPercent, 90, \
- "% of major receiver type to all profiled receivers") \
- \
notproduct(bool, TimeCompiler, false, \
"time the compiler") \
\
- notproduct(bool, TimeCompiler2, false, \
- "detailed time the compiler (requires +TimeCompiler)") \
- \
diagnostic(bool, PrintInlining, false, \
"prints inlining optimizations") \
\
- diagnostic(bool, PrintIntrinsics, false, \
- "prints attempted and successful inlining of intrinsics") \
- \
- product(bool, UseCountLeadingZerosInstruction, false, \
- "Use count leading zeros instruction") \
- \
product(bool, UsePopCountInstruction, false, \
"Use population count instruction") \
\
- diagnostic(ccstrlist, DisableIntrinsic, "", \
- "do not expand intrinsics whose (internal) names appear here") \
- \
- develop(bool, StressReflectiveCode, false, \
- "Use inexact types at allocations, etc., to test reflection") \
- \
develop(bool, EagerInitialization, false, \
"Eagerly initialize classes if possible") \
\
@@ -2608,10 +2424,6 @@
develop(bool, PrintMethodFlushing, false, \
"print the nmethods being flushed") \
\
- notproduct(bool, LogMultipleMutexLocking, false, \
- "log locking and unlocking of mutexes (only if multiple locks " \
- "are held)") \
- \
develop(bool, UseRelocIndex, false, \
"use an index to speed random access to relocations") \
\
@@ -2621,9 +2433,6 @@
diagnostic(bool, DebugNonSafepoints, trueInDebug, \
"Generate extra debugging info for non-safepoints in nmethods") \
\
- diagnostic(bool, DebugInlinedCalls, true, \
- "If false, restricts profiled locations to the root method only") \
- \
product(bool, PrintVMOptions, false, \
"Print flags that appeared on the command line") \
\
@@ -2700,9 +2509,6 @@
notproduct(bool, IgnoreLockingAssertions, false, \
"disable locking assertions (for speed)") \
\
- notproduct(bool, VerifyLoopOptimizations, false, \
- "verify major loop optimizations") \
- \
product(bool, RangeCheckElimination, true, \
"Split loop iterations to eliminate range checks") \
\
@@ -2712,12 +2518,6 @@
develop(bool, TypeProfileCasts, true, \
"treat casts like calls for purposes of type profiling") \
\
- develop(bool, MonomorphicArrayCheck, true, \
- "Uncommon-trap array store checks that require full type check") \
- \
- diagnostic(bool, ProfileDynamicTypes, true, \
- "do extra type profiling and use it more aggressively") \
- \
develop(bool, DelayCompilationDuringStartup, true, \
"Delay invoking the compiler until main application class is " \
"loaded") \
@@ -2732,19 +2532,9 @@
notproduct(intx, CompileTheWorldSafepointInterval, 100, \
"Force a safepoint every n compiles so sweeper can keep up") \
\
- develop(bool, TraceIterativeGVN, false, \
- "Print progress during Iterative Global Value Numbering") \
- \
develop(bool, FillDelaySlots, true, \
"Fill delay slots (on SPARC only)") \
\
- develop(bool, VerifyIterativeGVN, false, \
- "Verify Def-Use modifications during sparse Iterative Global " \
- "Value Numbering") \
- \
- notproduct(bool, TracePhaseCCP, false, \
- "Print progress during Conditional Constant Propagation") \
- \
develop(bool, TimeLivenessAnalysis, false, \
"Time computation of bytecode liveness analysis") \
\
@@ -2757,22 +2547,9 @@
notproduct(bool, CollectIndexSetStatistics, false, \
"Collect information about IndexSets") \
\
- develop(bool, PrintDominators, false, \
- "Print out dominator trees for GVN") \
- \
develop(bool, UseLoopSafepoints, true, \
"Generate Safepoint nodes in every loop") \
\
- notproduct(bool, TraceCISCSpill, false, \
- "Trace allocators use of cisc spillable instructions") \
- \
- notproduct(bool, TraceSpilling, false, \
- "Trace spilling") \
- \
- product(bool, SplitIfBlocks, true, \
- "Clone compares and control flow through merge points to fold " \
- "some branches") \
- \
develop(intx, FastAllocateSizeLimit, 128*K, \
/* Note: This value is zero mod 1<<13 for a cheap sparc set. */ \
"Inline allocations larger than this in doublewords must go slow")\
@@ -2829,15 +2606,6 @@
develop(bool, UseFastSignatureHandlers, true, \
"Use fast signature handlers for native calls") \
\
- develop(bool, UseV8InstrsOnly, false, \
- "Use SPARC-V8 Compliant instruction subset") \
- \
- product(bool, UseNiagaraInstrs, false, \
- "Use Niagara-efficient instruction subset") \
- \
- develop(bool, UseCASForSwap, false, \
- "Do not use swap instructions, but only CAS (in a loop) on SPARC")\
- \
product(bool, UseLoopCounter, true, \
"Increment invocation counter on backward branch") \
\
@@ -2854,9 +2622,6 @@
notproduct(bool, TraceOnStackReplacement, false, \
"Trace on stack replacement") \
\
- develop(bool, PoisonOSREntry, true, \
- "Detect abnormal calls to OSR code") \
- \
product_pd(bool, PreferInterpreterNativeStubs, \
"Use always interpreter stubs for native methods invoked via " \
"interpreter") \
@@ -2899,9 +2664,6 @@
develop(bool, TraceFrequencyInlining, false, \
"Trace frequency based inlining") \
\
- notproduct(bool, TraceTypeProfile, false, \
- "Trace type profile") \
- \
develop_pd(bool, InlineIntrinsics, \
"Inline intrinsics that can be statically resolved") \
\
@@ -2989,15 +2751,6 @@
product(intx, AllocatePrefetchInstr, 0, \
"Prefetch instruction to prefetch ahead of allocation pointer") \
\
- product(intx, ReadPrefetchInstr, 0, \
- "Prefetch instruction to prefetch ahead") \
- \
- product(uintx, ArraycopySrcPrefetchDistance, 0, \
- "Distance to prefetch source array in arracopy") \
- \
- product(uintx, ArraycopyDstPrefetchDistance, 0, \
- "Distance to prefetch destination array in arracopy") \
- \
/* deoptimization */ \
develop(bool, TraceDeoptimization, false, \
"Trace deoptimization") \
@@ -3088,9 +2841,6 @@
product(intx, MinInliningThreshold, 250, \
"min. invocation count a method needs to have to be inlined") \
\
- develop(intx, AlignEntryCode, 4, \
- "aligns entry code to specified value (in bytes)") \
- \
develop(intx, MethodHistogramCutoff, 100, \
"cutoff value for method invoc. histogram (+CountCalls)") \
\
@@ -3130,9 +2880,6 @@
"Minimum sleep() interval (milliseconds) when " \
"ConvertSleepToYield is off (used for SOLARIS)") \
\
- product(intx, EventLogLength, 2000, \
- "maximum nof events in event log") \
- \
develop(intx, ProfilerPCTickThreshold, 15, \
"Number of ticks in a PC buckets to be a hotspot") \
\
@@ -3171,9 +2918,6 @@
product(intx, PerBytecodeTrapLimit, 4, \
"Limit on traps (of one kind) at a particular BCI") \
\
- develop(intx, FreqCountInvocations, 1, \
- "Scaling factor for branch frequencies (deprecated)") \
- \
develop(intx, InlineFrequencyRatio, 20, \
"Ratio of call site execution to caller method invocation") \
\
@@ -3187,29 +2931,12 @@
develop(intx, InlineThrowMaxSize, 200, \
"Force inlining of throwing methods smaller than this") \
\
- product(intx, AliasLevel, 3, \
- "0 for no aliasing, 1 for oop/field/static/array split, " \
- "2 for class split, 3 for unique instances") \
- \
- develop(bool, VerifyAliases, false, \
- "perform extra checks on the results of alias analysis") \
- \
develop(intx, ProfilerNodeSize, 1024, \
"Size in K to allocate for the Profile Nodes of each thread") \
\
- develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
- "Number of times to spin wait on a v8 atomic operation lock") \
- \
- product(intx, ReadSpinIterations, 100, \
- "Number of read attempts before a yield (spin inner loop)") \
- \
product_pd(intx, PreInflateSpin, \
"Number of times to spin wait before inflation") \
\
- product(intx, PreBlockSpin, 10, \
- "Number of times to spin in an inflated lock before going to " \
- "an OS lock") \
- \
/* gc parameters */ \
product(uintx, InitialHeapSize, 0, \
"Initial heap size (in bytes); zero means OldSize + NewSize") \
@@ -3466,10 +3193,6 @@
"(non-negative value throws OOM after this many CI accesses " \
"in each compile)") \
\
- develop(intx, CIFireOOMAtDelay, -1, \
- "Wait for this many CI accesses to occur in all compiles before " \
- "beginning to throw OutOfMemoryErrors in each compile") \
- \
notproduct(bool, CIObjectFactoryVerify, false, \
"enable potentially expensive verification in ciObjectFactory") \
\
@@ -3663,9 +3386,6 @@
product(bool, PrintTieredEvents, false, \
"Print tiered events notifications") \
\
- product(bool, StressTieredRuntime, false, \
- "Alternate client and server compiler on compile requests") \
- \
product_pd(intx, OnStackReplacePercentage, \
"NON_TIERED number of method invocations/branches (expressed as %"\
"of CompileThreshold) before (re-)compiling OSR code") \
@@ -3835,9 +3555,6 @@
"support JSR 292 (method handles, invokedynamic, " \
"anonymous classes") \
\
- product(bool, AnonymousClasses, false, \
- "support sun.misc.Unsafe.defineAnonymousClass (deprecated)") \
- \
diagnostic(bool, PrintMethodHandleStubs, false, \
"Print generated stub code for method handles") \
\
@@ -3952,6 +3669,8 @@
RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
+ARCH_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
+
// Extensions
#include "runtime/globals_ext.hpp"
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp Fri Aug 31 16:17:40 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,6 @@
#define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#endif
-
#define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
#define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
@@ -81,6 +80,17 @@
#define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#endif
+#define ARCH_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define ARCH_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define ARCH_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#ifdef PRODUCT
+ #define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
+ #define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+#else
+ #define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+ #define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#endif
+
typedef enum {
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER, RUNTIME_LP64_PRODUCT_FLAG_MEMBER)
RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER)
@@ -93,6 +103,7 @@
#ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
#endif
+ ARCH_FLAGS(ARCH_DEVELOP_FLAG_MEMBER, ARCH_PRODUCT_FLAG_MEMBER, ARCH_DIAGNOSTIC_FLAG_MEMBER, ARCH_EXPERIMENTAL_FLAG_MEMBER, ARCH_NOTPRODUCT_FLAG_MEMBER)
COMMANDLINEFLAG_EXT
NUM_CommandLineFlag
} CommandLineFlag;
@@ -134,7 +145,6 @@
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
#endif // _LP64
-
#define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
@@ -149,6 +159,17 @@
#define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#endif
+#define ARCH_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define ARCH_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define ARCH_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#ifdef PRODUCT
+ #define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
+ #define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+#else
+ #define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#endif
+
typedef enum {
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE,
RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
@@ -193,6 +214,11 @@
C2_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE,
C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
#endif
+ ARCH_FLAGS(ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+ ARCH_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+ ARCH_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
+ ARCH_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE,
+ ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
COMMANDLINEFLAGWITHTYPE_EXT
NUM_CommandLineFlagWithType
} CommandLineFlagWithType;
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Aug 31 16:17:40 2012 -0700
@@ -2135,6 +2135,7 @@
/******************/ \
\
declare_constant(UseTLAB) \
+ declare_constant(EnableInvokeDynamic) \
\
/**************/ \
/* Stack bias */ \
--- a/hotspot/test/compiler/6894807/Test6894807.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/compiler/6894807/Test6894807.sh Fri Aug 31 16:17:40 2012 -0700
@@ -31,6 +31,11 @@
PS=";"
FS="\\"
;;
+ CYGWIN_* )
+ NULL=/dev/null
+ PS=";"
+ FS="/"
+ ;;
* )
echo "Unrecognized system!"
exit 1;
--- a/hotspot/test/gc/6941923/test6941923.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/gc/6941923/test6941923.sh Fri Aug 31 16:17:40 2012 -0700
@@ -14,7 +14,7 @@
PS=":"
FS="/"
;;
- Windows_* )
+ Windows_* | CYGWIN_* )
echo "Test skipped for Windows"
exit 0
;;
--- a/hotspot/test/runtime/6626217/Test6626217.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/runtime/6626217/Test6626217.sh Fri Aug 31 16:17:40 2012 -0700
@@ -65,6 +65,14 @@
CP=cp
MV=mv
;;
+ CYGWIN_* )
+ NULL=/dev/null
+ PS=";"
+ FS="/"
+ RM=rm
+ CP=cp
+ MV=mv
+ ;;
* )
echo "Unrecognized system!"
exit 1;
--- a/hotspot/test/runtime/6878713/Test6878713.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/runtime/6878713/Test6878713.sh Fri Aug 31 16:17:40 2012 -0700
@@ -38,6 +38,11 @@
PS=";"
FS="\\"
;;
+ CYGWIN_* )
+ NULL=/dev/null
+ PS=";"
+ FS="/"
+ ;;
* )
echo "Unrecognized system!"
exit 1;
--- a/hotspot/test/runtime/7020373/Test7020373.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/runtime/7020373/Test7020373.sh Fri Aug 31 16:17:40 2012 -0700
@@ -40,6 +40,11 @@
PS=";"
FS="\\"
;;
+ CYGWIN_* )
+ NULL=/dev/null
+ PS=";"
+ FS="/"
+ ;;
* )
echo "Unrecognized system!"
exit 1;
--- a/hotspot/test/runtime/7051189/Xchecksig.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/runtime/7051189/Xchecksig.sh Fri Aug 31 16:17:40 2012 -0700
@@ -46,7 +46,7 @@
SunOS | Linux | Darwin )
FS="/"
;;
- Windows_* )
+ Windows_* | CYGWIN_* )
printf "Not testing libjsig.so on Windows. PASSED.\n "
exit 0
;;
--- a/hotspot/test/runtime/7110720/Test7110720.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/runtime/7110720/Test7110720.sh Fri Aug 31 16:17:40 2012 -0700
@@ -55,6 +55,12 @@
CP=cp
MV=mv
;;
+ CYGWIN_* )
+ FS="/"
+ RM=rm
+ CP=cp
+ MV=mv
+ ;;
* )
echo "Unrecognized system!"
exit 1;
--- a/hotspot/test/runtime/7158800/Test7158800.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/runtime/7158800/Test7158800.sh Fri Aug 31 16:17:40 2012 -0700
@@ -56,6 +56,11 @@
PS=";"
FS="\\"
;;
+ CYGWIN_* )
+ NULL=/dev/null
+ PS=";"
+ FS="/"
+ ;;
* )
echo "Unrecognized system!"
exit 1;
--- a/hotspot/test/runtime/7158988/TestFieldMonitor.sh Thu Aug 30 10:27:07 2012 -0700
+++ b/hotspot/test/runtime/7158988/TestFieldMonitor.sh Fri Aug 31 16:17:40 2012 -0700
@@ -38,6 +38,18 @@
PS=";"
FS="\\"
;;
+ CYGWIN_NT* )
+ NULL=/dev/null
+ PS=";"
+ FS="/"
+ ;;
+ CYGWIN_* )
+ NULL=/dev/null
+ PS=";"
+ FS="/"
+ echo "Test skipped, only for WinNT"
+ exit 0
+ ;;
* )
echo "Unrecognized system!"
exit 1;