--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java Fri Oct 12 09:22:52 2012 -0700
@@ -89,6 +89,7 @@
public Address getAddressAt (long offset) throws UnmappedAddressException, UnalignedAddressException;
/** Returns the decoded address at the given offset */
public Address getCompOopAddressAt (long offset) throws UnmappedAddressException, UnalignedAddressException;
+ public Address getCompKlassAddressAt (long offset) throws UnmappedAddressException, UnalignedAddressException;
//
// Java-related routines
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java Fri Oct 12 09:22:52 2012 -0700
@@ -121,6 +121,9 @@
public long getHeapOopSize();
public long getNarrowOopBase();
public int getNarrowOopShift();
+ public long getKlassPtrSize();
+ public long getNarrowKlassBase();
+ public int getNarrowKlassShift();
public ReadResult readBytesFromProcess(long address, long numBytes)
throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java Fri Oct 12 09:22:52 2012 -0700
@@ -58,6 +58,10 @@
protected long heapOopSize;
protected long narrowOopBase; // heap base for compressed oops.
protected int narrowOopShift; // shift to decode compressed oops.
+ // class metadata space
+ protected long klassPtrSize;
+ protected long narrowKlassBase; // heap base for compressed klass ptrs.
+ protected int narrowKlassShift; // shift to decode compressed klass ptrs.
// Should be initialized if desired by calling initCache()
private PageCache cache;
@@ -159,10 +163,14 @@
javaPrimitiveTypesConfigured = true;
}
- public void putHeapConst(long heapOopSize, long narrowOopBase, int narrowOopShift) {
+ public void putHeapConst(long heapOopSize, long klassPtrSize, long narrowOopBase, int narrowOopShift,
+ long narrowKlassBase, int narrowKlassShift) {
this.heapOopSize = heapOopSize;
+ this.klassPtrSize = klassPtrSize;
this.narrowOopBase = narrowOopBase;
this.narrowOopShift = narrowOopShift;
+ this.narrowKlassBase = narrowKlassBase;
+ this.narrowKlassShift = narrowKlassShift;
}
/** May be called by subclasses if desired to initialize the page
@@ -464,6 +472,15 @@
return value;
}
+ protected long readCompKlassAddressValue(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCInteger(address, getKlassPtrSize(), true);
+ if (value != 0) {
+ value = (long)(narrowKlassBase + (long)(value << narrowKlassShift));
+ }
+ return value;
+ }
+
protected void writeAddressValue(long address, long value)
throws UnmappedAddressException, UnalignedAddressException {
writeCInteger(address, machDesc.getAddressSize(), value);
@@ -551,4 +568,15 @@
public int getNarrowOopShift() {
return narrowOopShift;
}
+
+ public long getKlassPtrSize() {
+ return klassPtrSize;
+ }
+
+ public long getNarrowKlassBase() {
+ return narrowKlassBase;
+ }
+ public int getNarrowKlassShift() {
+ return narrowKlassShift;
+ }
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java Fri Oct 12 09:22:52 2012 -0700
@@ -42,5 +42,7 @@
long jintSize,
long jlongSize,
long jshortSize);
- public void putHeapConst(long heapOopSize, long narrowOopBase, int narrowOopShift);
+ public void putHeapConst(long heapOopSize, long klassPtrSize,
+ long narrowKlassBase, int narrowKlassShift,
+ long narrowOopBase, int narrowOopShift);
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java Fri Oct 12 09:22:52 2012 -0700
@@ -79,6 +79,11 @@
return debugger.readCompOopAddress(addr + offset);
}
+ public Address getCompKlassAddressAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompOopAddress(addr + offset);
+ }
+
//
// Java-related routines
//
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebugger.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebugger.java Fri Oct 12 09:22:52 2012 -0700
@@ -46,6 +46,7 @@
throws DebuggerException;
public BsdAddress readAddress(long address) throws DebuggerException;
public BsdAddress readCompOopAddress(long address) throws DebuggerException;
+ public BsdAddress readCompKlassAddress(long address) throws DebuggerException;
public BsdOopHandle readOopHandle(long address) throws DebuggerException;
public BsdOopHandle readCompOopHandle(long address) throws DebuggerException;
public long[] getThreadIntegerRegisterSet(int lwp_id) throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java Fri Oct 12 09:22:52 2012 -0700
@@ -431,6 +431,12 @@
return (value == 0 ? null : new BsdAddress(this, value));
}
+ public BsdAddress readCompKlassAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompKlassAddressValue(address);
+ return (value == 0 ? null : new BsdAddress(this, value));
+ }
+
/** From the BsdDebugger interface */
public BsdOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException,
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java Fri Oct 12 09:22:52 2012 -0700
@@ -80,6 +80,10 @@
return new DummyAddress(debugger, badLong);
}
+ public Address getCompKlassAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return new DummyAddress(debugger, badLong);
+ }
+
//
// Java-related routines
//
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Fri Oct 12 09:22:52 2012 -0700
@@ -79,6 +79,11 @@
return debugger.readCompOopAddress(addr + offset);
}
+ public Address getCompKlassAddressAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompKlassAddress(addr + offset);
+ }
+
//
// Java-related routines
//
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java Fri Oct 12 09:22:52 2012 -0700
@@ -46,6 +46,7 @@
throws DebuggerException;
public LinuxAddress readAddress(long address) throws DebuggerException;
public LinuxAddress readCompOopAddress(long address) throws DebuggerException;
+ public LinuxAddress readCompKlassAddress(long address) throws DebuggerException;
public LinuxOopHandle readOopHandle(long address) throws DebuggerException;
public LinuxOopHandle readCompOopHandle(long address) throws DebuggerException;
public long[] getThreadIntegerRegisterSet(int lwp_id) throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java Fri Oct 12 09:22:52 2012 -0700
@@ -429,6 +429,12 @@
return (value == 0 ? null : new LinuxAddress(this, value));
}
+ public LinuxAddress readCompKlassAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompKlassAddressValue(address);
+ return (value == 0 ? null : new LinuxAddress(this, value));
+ }
+
/** From the LinuxDebugger interface */
public LinuxOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException,
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java Fri Oct 12 09:22:52 2012 -0700
@@ -76,6 +76,10 @@
return debugger.readCompOopAddress(addr + offset);
}
+ public Address getCompKlassAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompKlassAddress(addr + offset);
+ }
+
//
// Java-related routines
//
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java Fri Oct 12 09:22:52 2012 -0700
@@ -47,6 +47,7 @@
throws DebuggerException;
public ProcAddress readAddress(long address) throws DebuggerException;
public ProcAddress readCompOopAddress(long address) throws DebuggerException;
+ public ProcAddress readCompKlassAddress(long address) throws DebuggerException;
public ProcOopHandle readOopHandle(long address) throws DebuggerException;
public ProcOopHandle readCompOopHandle(long address) throws DebuggerException;
public long[] getThreadIntegerRegisterSet(int tid) throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java Fri Oct 12 09:22:52 2012 -0700
@@ -351,6 +351,12 @@
return (value == 0 ? null : new ProcAddress(this, value));
}
+ public ProcAddress readCompKlassAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompKlassAddressValue(address);
+ return (value == 0 ? null : new ProcAddress(this, value));
+ }
+
/** From the ProcDebugger interface */
public ProcOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java Fri Oct 12 09:22:52 2012 -0700
@@ -74,6 +74,9 @@
public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
return debugger.readCompOopAddress(addr + offset);
}
+ public Address getCompKlassAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompKlassAddress(addr + offset);
+ }
//
// Java-related routines
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java Fri Oct 12 09:22:52 2012 -0700
@@ -68,6 +68,9 @@
public long getHeapOopSize() throws RemoteException;
public long getNarrowOopBase() throws RemoteException;
public int getNarrowOopShift() throws RemoteException;
+ public long getKlassPtrSize() throws RemoteException;
+ public long getNarrowKlassBase() throws RemoteException;
+ public int getNarrowKlassShift() throws RemoteException;
public boolean areThreadsEqual(long addrOrId1, boolean isAddress1,
long addrOrId2, boolean isAddress2) throws RemoteException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java Fri Oct 12 09:22:52 2012 -0700
@@ -99,7 +99,10 @@
javaPrimitiveTypesConfigured = true;
narrowOopBase = remoteDebugger.getNarrowOopBase();
narrowOopShift = remoteDebugger.getNarrowOopShift();
+ narrowKlassBase = remoteDebugger.getNarrowKlassBase();
+ narrowKlassShift = remoteDebugger.getNarrowKlassShift();
heapOopSize = remoteDebugger.getHeapOopSize();
+ klassPtrSize = remoteDebugger.getKlassPtrSize();
}
catch (RemoteException e) {
throw new DebuggerException(e);
@@ -319,6 +322,12 @@
return (value == 0 ? null : new RemoteAddress(this, value));
}
+ RemoteAddress readCompKlassAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompKlassAddressValue(address);
+ return (value == 0 ? null : new RemoteAddress(this, value));
+ }
+
RemoteOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
long value = readAddressValue(address);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java Fri Oct 12 09:22:52 2012 -0700
@@ -126,6 +126,18 @@
return debugger.getNarrowOopShift();
}
+ public long getKlassPtrSize() throws RemoteException {
+ return debugger.getHeapOopSize();
+ }
+
+ public long getNarrowKlassBase() throws RemoteException {
+ return debugger.getNarrowKlassBase();
+ }
+
+ public int getNarrowKlassShift() throws RemoteException {
+ return debugger.getNarrowKlassShift();
+ }
+
public boolean areThreadsEqual(long addrOrId1, boolean isAddress1,
long addrOrId2, boolean isAddress2) throws RemoteException {
ThreadProxy t1 = getThreadProxy(addrOrId1, isAddress1);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java Fri Oct 12 09:22:52 2012 -0700
@@ -76,6 +76,10 @@
return debugger.readCompOopAddress(addr + offset);
}
+ public Address getCompKlassAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompKlassAddress(addr + offset);
+ }
+
//
// Java-related routines
//
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java Fri Oct 12 09:22:52 2012 -0700
@@ -46,6 +46,7 @@
throws DebuggerException;
public WindbgAddress readAddress(long address) throws DebuggerException;
public WindbgAddress readCompOopAddress(long address) throws DebuggerException;
+ public WindbgAddress readCompKlassAddress(long address) throws DebuggerException;
public WindbgOopHandle readOopHandle(long address) throws DebuggerException;
public WindbgOopHandle readCompOopHandle(long address) throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java Fri Oct 12 09:22:52 2012 -0700
@@ -321,6 +321,11 @@
return (WindbgAddress) newAddress(readCompOopAddressValue(address));
}
+ public WindbgAddress readCompKlassAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ return (WindbgAddress) newAddress(readCompKlassAddressValue(address));
+ }
+
/** From the WindbgDebugger interface */
public WindbgOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java Fri Oct 12 09:22:52 2012 -0700
@@ -53,6 +53,8 @@
private static AddressField narrowOopBaseField;
private static CIntegerField narrowOopShiftField;
+ private static AddressField narrowKlassBaseField;
+ private static CIntegerField narrowKlassShiftField;
static {
VM.registerVMInitializedObserver(new Observer() {
@@ -86,6 +88,8 @@
narrowOopBaseField = type.getAddressField("_narrow_oop._base");
narrowOopShiftField = type.getCIntegerField("_narrow_oop._shift");
+ narrowKlassBaseField = type.getAddressField("_narrow_klass._base");
+ narrowKlassShiftField = type.getCIntegerField("_narrow_klass._shift");
}
public Universe() {
@@ -111,6 +115,19 @@
return (int)narrowOopShiftField.getValue();
}
+ public static long getNarrowKlassBase() {
+ if (narrowKlassBaseField.getValue() == null) {
+ return 0;
+ } else {
+ return narrowKlassBaseField.getValue().minus(null);
+ }
+ }
+
+ public static int getNarrowKlassShift() {
+ return (int)narrowKlassShiftField.getValue();
+ }
+
+
/** Returns "TRUE" iff "p" points into the allocated area of the heap. */
public boolean isIn(Address p) {
return heap().isIn(p);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java Fri Oct 12 09:22:52 2012 -0700
@@ -59,7 +59,7 @@
if (headerSize != 0) {
return headerSize;
}
- if (VM.getVM().isCompressedHeadersEnabled()) {
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
headerSize = typeSize;
} else {
headerSize = VM.getVM().alignUp(typeSize + VM.getVM().getIntSize(),
@@ -80,7 +80,7 @@
if (lengthOffsetInBytes != 0) {
return lengthOffsetInBytes;
}
- if (VM.getVM().isCompressedHeadersEnabled()) {
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
lengthOffsetInBytes = typeSize - VM.getVM().getIntSize();
} else {
lengthOffsetInBytes = typeSize;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java Fri Oct 12 09:22:52 2012 -0700
@@ -53,7 +53,7 @@
// Returns header size in bytes.
public static long getHeaderSize() {
- if (VM.getVM().isCompressedHeadersEnabled()) {
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
return typeSize - VM.getVM().getIntSize();
} else {
return typeSize;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MetadataField.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MetadataField.java Fri Oct 12 09:22:52 2012 -0700
@@ -27,7 +27,6 @@
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.debugger.*;
-// The class for an C int field simply provides access to the value.
public class MetadataField extends Field {
public MetadataField(sun.jvm.hotspot.types.AddressField vmField, long startOffset) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/NarrowKlassField.java Fri Oct 12 09:22:52 2012 -0700
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import sun.jvm.hotspot.debugger.*;
+
+public class NarrowKlassField extends MetadataField {
+
+ public NarrowKlassField(sun.jvm.hotspot.types.AddressField vmField, long startOffset) {
+ super(vmField, startOffset);
+ }
+
+ public Metadata getValue(Address addr) {
+ return Metadata.instantiateWrapperFor(addr.getCompKlassAddressAt(getOffset()));
+ }
+ public void setValue(Oop obj, long value) throws MutationException {
+ // Fix this: set* missing in Address
+ }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java Fri Oct 12 09:22:52 2012 -0700
@@ -47,10 +47,7 @@
Type type = db.lookupType("oopDesc");
mark = new CIntField(type.getCIntegerField("_mark"), 0);
klass = new MetadataField(type.getAddressField("_metadata._klass"), 0);
- if (VM.getVM().isCompressedHeadersEnabled()) {
- // compressedKlass = new CIntField(type.getCIntegerField("_metadata._compressed_klass"), 0);
- throw new InternalError("unimplemented");
- }
+ compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0);
headerSize = type.getSize();
}
@@ -74,13 +71,13 @@
private static CIntField mark;
private static MetadataField klass;
- private static CIntField compressedKlass;
+ private static NarrowKlassField compressedKlass;
// Accessors for declared fields
public Mark getMark() { return new Mark(getHandle()); }
public Klass getKlass() {
- if (VM.getVM().isCompressedHeadersEnabled()) {
- throw new InternalError("unimplemented");
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
+ return (Klass)compressedKlass.getValue(getHandle());
} else {
return (Klass)klass.getValue(getHandle());
}
@@ -150,7 +147,7 @@
void iterateFields(OopVisitor visitor, boolean doVMFields) {
if (doVMFields) {
visitor.doCInt(mark, true);
- if (VM.getVM().isCompressedHeadersEnabled()) {
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
throw new InternalError("unimplemented");
} else {
visitor.doMetadata(klass, true);
@@ -210,8 +207,8 @@
if (handle == null) {
return null;
}
- if (VM.getVM().isCompressedHeadersEnabled()) {
- throw new InternalError("Unimplemented");
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
+ return (Klass)Metadata.instantiateWrapperFor(handle.getCompKlassAddressAt(compressedKlass.getOffset()));
} else {
return (Klass)Metadata.instantiateWrapperFor(handle.getAddressAt(klass.getOffset()));
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/java_lang_Class.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/java_lang_Class.java Fri Oct 12 09:22:52 2012 -0700
@@ -63,11 +63,7 @@
/** get Klass* field at offset hc_klass_offset from a java.lang.Class object */
public static Klass asKlass(Oop aClass) {
- if (VM.getVM().isCompressedHeadersEnabled()) {
- throw new InternalError("unimplemented");
- } else {
- return (Klass)Metadata.instantiateWrapperFor(aClass.getHandle().getAddressAt(klassOffset));
- }
+ return (Klass)Metadata.instantiateWrapperFor(aClass.getHandle().getAddressAt(klassOffset));
}
/** get oop_size field at offset oop_size_offset from a java.lang.Class object */
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Oct 12 09:22:52 2012 -0700
@@ -103,6 +103,7 @@
private int logMinObjAlignmentInBytes;
private int heapWordSize;
private int heapOopSize;
+ private int klassPtrSize;
private int oopSize;
/** This is only present in a non-core build */
private CodeCache codeCache;
@@ -129,7 +130,7 @@
private static CIntegerType boolType;
private Boolean sharingEnabled;
private Boolean compressedOopsEnabled;
- private Boolean compressedHeadersEnabled;
+ private Boolean compressedKlassPointersEnabled;
// command line flags supplied to VM - see struct Flag in globals.hpp
public static final class Flag {
@@ -350,6 +351,12 @@
} else {
heapOopSize = (int)getOopSize();
}
+
+ if (isCompressedKlassPointersEnabled()) {
+ klassPtrSize = (int)getIntSize();
+ } else {
+ klassPtrSize = (int)getOopSize(); // same as an oop
+ }
}
/** This could be used by a reflective runtime system */
@@ -374,8 +381,9 @@
((Observer) iter.next()).update(null, null);
}
- debugger.putHeapConst(soleInstance.getHeapOopSize(), Universe.getNarrowOopBase(),
- Universe.getNarrowOopShift());
+ debugger.putHeapConst(soleInstance.getHeapOopSize(), soleInstance.getKlassPtrSize(),
+ Universe.getNarrowOopBase(), Universe.getNarrowOopShift(),
+ Universe.getNarrowKlassBase(), Universe.getNarrowKlassShift());
}
/** This is used by the debugging system */
@@ -536,6 +544,10 @@
public int getHeapOopSize() {
return heapOopSize;
}
+
+ public int getKlassPtrSize() {
+ return klassPtrSize;
+ }
/** Utility routine for getting data structure alignment correct */
public long alignUp(long size, long alignment) {
return (size + alignment - 1) & ~(alignment - 1);
@@ -784,13 +796,13 @@
return compressedOopsEnabled.booleanValue();
}
- public boolean isCompressedHeadersEnabled() {
- if (compressedHeadersEnabled == null) {
- Flag flag = getCommandLineFlag("UseCompressedHeaders");
- compressedHeadersEnabled = (flag == null) ? Boolean.FALSE:
+ public boolean isCompressedKlassPointersEnabled() {
+ if (compressedKlassPointersEnabled == null) {
+ Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
+ compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
(flag.getBool()? Boolean.TRUE: Boolean.FALSE);
}
- return compressedHeadersEnabled.booleanValue();
+ return compressedKlassPointersEnabled.booleanValue();
}
public int getObjectAlignmentInBytes() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java Fri Oct 12 09:22:52 2012 -0700
@@ -53,9 +53,8 @@
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("oopDesc");
- if (VM.getVM().isCompressedHeadersEnabled()) {
- // klassField = type.getNarrowOopField("_metadata._compressed_klass");
- throw new InternalError("unimplemented");
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
+ klassField = type.getAddressField("_metadata._compressed_klass");
} else {
klassField = type.getAddressField("_metadata._klass");
}
@@ -70,7 +69,11 @@
}
try {
// Try to instantiate the Klass
- Metadata.instantiateWrapperFor(klassField.getValue(oop));
+ if (VM.getVM().isCompressedKlassPointersEnabled()) {
+ Metadata.instantiateWrapperFor(oop.getCompKlassAddressAt(klassField.getOffset()));
+ } else {
+ Metadata.instantiateWrapperFor(klassField.getValue(oop));
+ }
return true;
}
catch (AddressException e) {
--- a/hotspot/make/bsd/makefiles/dtrace.make Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/make/bsd/makefiles/dtrace.make Fri Oct 12 09:22:52 2012 -0700
@@ -114,21 +114,21 @@
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
$(JVMOFFS).h: $(GENOFFS)
- $(QUIETLY) DYLD_LIBRARY_PATH=. ./$(GENOFFS) -header > $@.tmp; touch $@; \
+ $(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -header > $@.tmp; touch $@; \
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
then rm -f $@; mv $@.tmp $@; \
else rm -f $@.tmp; \
fi
$(JVMOFFS)Index.h: $(GENOFFS)
- $(QUIETLY) DYLD_LIBRARY_PATH=. ./$(GENOFFS) -index > $@.tmp; touch $@; \
+ $(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -index > $@.tmp; touch $@; \
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
then rm -f $@; mv $@.tmp $@; \
else rm -f $@.tmp; \
fi
$(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
- $(QUIETLY) DYLD_LIBRARY_PATH=. ./$(GENOFFS) -table > $@.tmp; touch $@; \
+ $(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -table > $@.tmp; touch $@; \
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
then rm -f $@; mv $@.tmp $@; \
else rm -f $@.tmp; \
--- a/hotspot/make/solaris/makefiles/dtrace.make Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/make/solaris/makefiles/dtrace.make Fri Oct 12 09:22:52 2012 -0700
@@ -206,15 +206,15 @@
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
$(JVMOFFS).h: $(GENOFFS)
- $(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -header > $@.tmp
+ $(QUIETLY) LD_LIBRARY_PATH=.:$(LD_LIBRARY_PATH) ./$(GENOFFS) -header > $@.tmp
$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
$(JVMOFFS)Index.h: $(GENOFFS)
- $(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -index > $@.tmp
+ $(QUIETLY) LD_LIBRARY_PATH=.:$(LD_LIBRARY_PATH) ./$(GENOFFS) -index > $@.tmp
$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
$(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
- $(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -table > $@.tmp
+ $(QUIETLY) LD_LIBRARY_PATH=.:$(LD_LIBRARY_PATH) ./$(GENOFFS) -table > $@.tmp
$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1641,6 +1641,21 @@
}
+void MacroAssembler::set_narrow_klass(Klass* k, Register d) {
+ assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ narrowOop encoded_k = oopDesc::encode_klass(k);
+
+ assert_not_delayed();
+ // Relocation with special format (see relocInfo_sparc.hpp).
+ relocate(rspec, 1);
+ // Assembler::sethi(encoded_k, d);
+ emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) );
+ // Don't add relocation for 'add'. Do patching during 'sethi' processing.
+ add(d, low10(encoded_k), d);
+
+}
void MacroAssembler::align(int modulus) {
while (offset() % modulus != 0) nop();
@@ -4660,7 +4675,7 @@
// if this changes, change that.
if (UseCompressedKlassPointers) {
lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
- decode_heap_oop_not_null(klass);
+ decode_klass_not_null(klass);
} else {
ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
}
@@ -4669,7 +4684,7 @@
void MacroAssembler::store_klass(Register klass, Register dst_oop) {
if (UseCompressedKlassPointers) {
assert(dst_oop != klass, "not enough registers");
- encode_heap_oop_not_null(klass);
+ encode_klass_not_null(klass);
st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
} else {
st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
@@ -4829,17 +4844,58 @@
// pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
assert (UseCompressedOops, "must be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
sllx(src, LogMinObjAlignmentInBytes, dst);
if (Universe::narrow_oop_base() != NULL)
add(dst, G6_heapbase, dst);
}
+void MacroAssembler::encode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ if (Universe::narrow_klass_base() != NULL)
+ sub(r, G6_heapbase, r);
+ srlx(r, LogKlassAlignmentInBytes, r);
+}
+
+void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ if (Universe::narrow_klass_base() == NULL) {
+ srlx(src, LogKlassAlignmentInBytes, dst);
+ } else {
+ sub(src, G6_heapbase, dst);
+ srlx(dst, LogKlassAlignmentInBytes, dst);
+ }
+}
+
+void MacroAssembler::decode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+ // pd_code_size_limit.
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ sllx(r, LogKlassAlignmentInBytes, r);
+ if (Universe::narrow_klass_base() != NULL)
+ add(r, G6_heapbase, r);
+}
+
+void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+ // pd_code_size_limit.
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ sllx(src, LogKlassAlignmentInBytes, dst);
+ if (Universe::narrow_klass_base() != NULL)
+ add(dst, G6_heapbase, dst);
+}
+
void MacroAssembler::reinit_heapbase() {
- if (UseCompressedOops) {
- // call indirectly to solve generation ordering problem
- AddressLiteral base(Universe::narrow_oop_base_addr());
+ if (UseCompressedOops || UseCompressedKlassPointers) {
+ AddressLiteral base(Universe::narrow_ptrs_base_addr());
load_ptr_contents(base, G6_heapbase);
}
}
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -2280,6 +2280,11 @@
void encode_heap_oop_not_null(Register src, Register dst);
void decode_heap_oop_not_null(Register src, Register dst);
+ void encode_klass_not_null(Register r);
+ void decode_klass_not_null(Register r);
+ void encode_klass_not_null(Register src, Register dst);
+ void decode_klass_not_null(Register src, Register dst);
+
// Support for managing the JavaThread pointer (i.e.; the reference to
// thread-local information).
void get_thread(); // load G2_thread
@@ -2409,6 +2414,7 @@
inline void set_metadata (const AddressLiteral& obj_addr, Register d); // same as load_address
void set_narrow_oop( jobject obj, Register d );
+ void set_narrow_klass( Klass* k, Register d );
// nop padding
void align(int modulus);
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -105,6 +105,11 @@
if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
}
+ if (UseCompressedKlassPointers) {
+ if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
+ src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
+ }
+
if (dst->is_register()) {
if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
return !PatchALot;
@@ -969,8 +974,18 @@
#endif
}
break;
- case T_METADATA:
- case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
+ case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
+ case T_ADDRESS:
+#ifdef _LP64
+ if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
+ __ lduw(base, offset, to_reg->as_register());
+ __ decode_klass_not_null(to_reg->as_register());
+ } else
+#endif
+ {
+ __ ld_ptr(base, offset, to_reg->as_register());
+ }
+ break;
case T_ARRAY : // fall through
case T_OBJECT:
{
@@ -2344,7 +2359,7 @@
if (UseCompressedKlassPointers) {
// tmp holds the default type. It currently comes uncompressed after the
// load of a constant, so encode it.
- __ encode_heap_oop(tmp);
+ __ encode_klass_not_null(tmp);
// load the raw value of the dst klass, since we will be comparing
// uncompressed values directly.
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -189,7 +189,7 @@
if (UseCompressedKlassPointers) {
// Save klass
mov(klass, t1);
- encode_heap_oop_not_null(t1);
+ encode_klass_not_null(t1);
stw(t1, obj, oopDesc::klass_offset_in_bytes());
} else {
st_ptr(klass, obj, oopDesc::klass_offset_in_bytes());
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -208,8 +208,6 @@
Register O1_scratch = O1;
Register O4_param_size = O4; // size of parameters
- address code_start = __ pc();
-
// here's where control starts out:
__ align(CodeEntryAlignment);
address entry_point = __ pc();
@@ -252,22 +250,9 @@
// O4_first_arg_addr is live!
if (TraceMethodHandles) {
- const char* name = vmIntrinsics::name_at(iid);
- if (*name == '_') name += 1;
- const size_t len = strlen(name) + 50;
- char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
- const char* suffix = "";
- if (vmIntrinsics::method_for(iid) == NULL ||
- !vmIntrinsics::method_for(iid)->access_flags().is_public()) {
- if (is_signature_polymorphic_static(iid))
- suffix = "/static";
- else
- suffix = "/private";
- }
- jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
if (O0_mh != noreg)
__ mov(O0_mh, G3_method_handle); // make stub happy
- trace_method_handle(_masm, qname);
+ trace_method_handle_interpreter_entry(_masm, iid);
}
if (iid == vmIntrinsics::_invokeBasic) {
@@ -287,14 +272,6 @@
generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry);
}
- if (PrintMethodHandleStubs) {
- address code_end = __ pc();
- tty->print_cr("--------");
- tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid));
- Disassembler::decode(code_start, code_end);
- tty->cr();
- }
-
return entry_point;
}
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -58,5 +58,3 @@
Register recv, Register method_temp,
Register temp2, Register temp3,
bool for_compiler_entry);
-
- static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
--- a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -97,8 +97,8 @@
jint inst2;
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
if (format() != 0) {
- assert(type() == relocInfo::oop_type, "only narrow oops case");
- jint np = oopDesc::encode_heap_oop((oop)x);
+ assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
+ jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : oopDesc::encode_klass((Klass*)x);
inst &= ~Assembler::hi22(-1);
inst |= Assembler::hi22((intptr_t)np);
if (verify_only) {
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad Fri Oct 12 09:22:52 2012 -0700
@@ -557,9 +557,9 @@
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
int klass_load_size;
- if (UseCompressedOops && UseCompressedKlassPointers) {
+ if (UseCompressedKlassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
- if (Universe::narrow_oop_base() == NULL)
+ if (Universe::narrow_klass_base() == NULL)
klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
else
klass_load_size = 3*BytesPerInstWord;
@@ -1707,11 +1707,11 @@
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
#ifdef _LP64
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
st->print_cr("\tSLL R_G5,3,R_G5");
- if (Universe::narrow_oop_base() != NULL)
+ if (Universe::narrow_klass_base() != NULL)
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
} else {
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
@@ -1942,6 +1942,12 @@
return false;
}
+bool Matcher::narrow_klass_use_complex_address() {
+ NOT_LP64(ShouldNotCallThis());
+ assert(UseCompressedKlassPointers, "only for compressed klass code");
+ return false;
+}
+
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a
@@ -2602,9 +2608,9 @@
int off = __ offset();
__ load_klass(O0, G3_scratch);
int klass_load_size;
- if (UseCompressedOops && UseCompressedKlassPointers) {
+ if (UseCompressedKlassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
- if (Universe::narrow_oop_base() == NULL)
+ if (Universe::narrow_klass_base() == NULL)
klass_load_size = 2*BytesPerInstWord;
else
klass_load_size = 3*BytesPerInstWord;
@@ -3610,6 +3616,15 @@
interface(CONST_INTER);
%}
+operand immNKlass()
+%{
+ match(ConNKlass);
+
+ op_cost(10);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
// NULL Pointer Immediate
operand immN0()
%{
@@ -5870,8 +5885,8 @@
%}
// Load Unsigned Integer into a Long Register
-instruct loadUI2L(iRegL dst, memory mem) %{
- match(Set dst (LoadUI2L mem));
+instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{
+ match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
ins_cost(MEMORY_REF_COST);
size(4);
@@ -6159,6 +6174,17 @@
ins_pipe(ialu_hi_lo_reg);
%}
+instruct loadConNKlass(iRegN dst, immNKlass src) %{
+ match(Set dst src);
+ ins_cost(DEFAULT_COST * 3/2);
+ format %{ "SET $src,$dst\t! compressed klass ptr" %}
+ ins_encode %{
+ Register dst = $dst$$Register;
+ __ set_narrow_klass((Klass*)$src$$constant, dst);
+ %}
+ ins_pipe(ialu_hi_lo_reg);
+%}
+
// Materialize long value (predicated by immL_cheap).
instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{
match(Set dst con);
@@ -6475,6 +6501,25 @@
ins_pipe(istore_mem_spORreg);
%}
+instruct storeNKlass(memory dst, iRegN src) %{
+ match(Set dst (StoreNKlass dst src));
+ ins_cost(MEMORY_REF_COST);
+ size(4);
+
+ format %{ "STW $src,$dst\t! compressed klass ptr" %}
+ ins_encode %{
+ Register base = as_Register($dst$$base);
+ Register index = as_Register($dst$$index);
+ Register src = $src$$Register;
+ if (index != G0) {
+ __ stw(src, base, index);
+ } else {
+ __ stw(src, base, $dst$$disp);
+ }
+ %}
+ ins_pipe(istore_mem_spORreg);
+%}
+
instruct storeN0(memory dst, immN0 src) %{
match(Set dst (StoreN dst src));
ins_cost(MEMORY_REF_COST);
@@ -6582,6 +6627,23 @@
ins_pipe(ialu_reg);
%}
+instruct encodeKlass_not_null(iRegN dst, iRegP src) %{
+ match(Set dst (EncodePKlass src));
+ format %{ "encode_klass_not_null $src, $dst" %}
+ ins_encode %{
+ __ encode_klass_not_null($src$$Register, $dst$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct decodeKlass_not_null(iRegP dst, iRegN src) %{
+ match(Set dst (DecodeNKlass src));
+ format %{ "decode_klass_not_null $src, $dst" %}
+ ins_encode %{
+ __ decode_klass_not_null($src$$Register, $dst$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
//----------MemBar Instructions-----------------------------------------------
// Memory barrier flavors
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -117,6 +117,7 @@
// 32-bit oops don't make sense for the 64-bit VM on sparc
// since the 32-bit VM has the same registers and smaller objects.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+ Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
#endif // _LP64
#ifdef COMPILER2
// Indirect branch is the same cost as direct
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -220,13 +220,13 @@
const int basic = 5*BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedKlassPointers ?
- ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
+ ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
return basic + slop;
} else {
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedKlassPointers ?
- ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
+ ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
return (basic + slop);
}
}
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -6916,7 +6916,7 @@
#ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
// r12 is the heapbase.
- LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base");)
+ LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
#endif // ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
@@ -10016,7 +10016,7 @@
#ifdef _LP64
if (UseCompressedKlassPointers) {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- decode_heap_oop_not_null(dst);
+ decode_klass_not_null(dst);
} else
#endif
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
@@ -10027,15 +10027,10 @@
if (UseCompressedKlassPointers) {
assert (Universe::heap() != NULL, "java heap should be initialized");
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- if (Universe::narrow_oop_shift() != 0) {
- assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- if (LogMinObjAlignmentInBytes == Address::times_8) {
- movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
- } else {
- // OK to use shift since we don't need to preserve flags.
- shlq(dst, LogMinObjAlignmentInBytes);
- movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset()));
- }
+ if (Universe::narrow_klass_shift() != 0) {
+ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
+ movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
} else {
movq(dst, Address(dst, Klass::prototype_header_offset()));
}
@@ -10050,7 +10045,7 @@
void MacroAssembler::store_klass(Register dst, Register src) {
#ifdef _LP64
if (UseCompressedKlassPointers) {
- encode_heap_oop_not_null(src);
+ encode_klass_not_null(src);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
} else
#endif
@@ -10132,12 +10127,12 @@
#ifdef ASSERT
void MacroAssembler::verify_heapbase(const char* msg) {
- assert (UseCompressedOops, "should be compressed");
+ assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
assert (Universe::heap() != NULL, "java heap should be initialized");
if (CheckCompressedOops) {
Label ok;
push(rscratch1); // cmpptr trashes rscratch1
- cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
+ cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
jcc(Assembler::equal, ok);
STOP(msg);
bind(ok);
@@ -10275,6 +10270,74 @@
}
}
+void MacroAssembler::encode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
+#endif
+ if (Universe::narrow_klass_base() != NULL) {
+ subq(r, r12_heapbase);
+ }
+ if (Universe::narrow_klass_shift() != 0) {
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ shrq(r, LogKlassAlignmentInBytes);
+ }
+}
+
+void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
+#endif
+ if (dst != src) {
+ movq(dst, src);
+ }
+ if (Universe::narrow_klass_base() != NULL) {
+ subq(dst, r12_heapbase);
+ }
+ if (Universe::narrow_klass_shift() != 0) {
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ shrq(dst, LogKlassAlignmentInBytes);
+ }
+}
+
+void MacroAssembler::decode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Note: it will change flags
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ // Cannot assert, unverified entry point counts instructions (see .ad file)
+ // vtableStubs also counts instructions in pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ if (Universe::narrow_klass_shift() != 0) {
+ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ shlq(r, LogKlassAlignmentInBytes);
+ if (Universe::narrow_klass_base() != NULL) {
+ addq(r, r12_heapbase);
+ }
+ } else {
+ assert (Universe::narrow_klass_base() == NULL, "sanity");
+ }
+}
+
+void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Note: it will change flags
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ // Cannot assert, unverified entry point counts instructions (see .ad file)
+ // vtableStubs also counts instructions in pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ if (Universe::narrow_klass_shift() != 0) {
+ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
+ leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+ } else {
+ assert (Universe::narrow_klass_base() == NULL, "sanity");
+ if (dst != src) {
+ movq(dst, src);
+ }
+ }
+}
+
void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
@@ -10293,6 +10356,22 @@
mov_narrow_oop(dst, oop_index, rspec);
}
+void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
+void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
@@ -10311,9 +10390,25 @@
Assembler::cmp_narrow_oop(dst, oop_index, rspec);
}
+void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
+void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
void MacroAssembler::reinit_heapbase() {
- if (UseCompressedOops) {
- movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
+ if (UseCompressedOops || UseCompressedKlassPointers) {
+ movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
}
}
#endif // _LP64
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -2083,6 +2083,15 @@
void cmp_narrow_oop(Register dst, jobject obj);
void cmp_narrow_oop(Address dst, jobject obj);
+ void encode_klass_not_null(Register r);
+ void decode_klass_not_null(Register r);
+ void encode_klass_not_null(Register dst, Register src);
+ void decode_klass_not_null(Register dst, Register src);
+ void set_narrow_klass(Register dst, Klass* k);
+ void set_narrow_klass(Address dst, Klass* k);
+ void cmp_narrow_klass(Register dst, Klass* k);
+ void cmp_narrow_klass(Address dst, Klass* k);
+
// if heap base register is used - reinit it with the correct value
void reinit_heapbase();
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -148,7 +148,7 @@
static int adjust_reg_range(int range) {
// Reduce the number of available regs (to free r12) in case of compressed oops
- if (UseCompressedOops) return range - 1;
+ if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
return range;
}
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -340,7 +340,7 @@
Register receiver = FrameMap::receiver_opr->as_register();
Register ic_klass = IC_Klass;
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- const bool do_post_padding = VerifyOops || UseCompressedOops;
+ const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
if (!do_post_padding) {
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
@@ -1262,7 +1262,11 @@
break;
case T_ADDRESS:
- __ movptr(dest->as_register(), from_addr);
+ if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
+ __ movl(dest->as_register(), from_addr);
+ } else {
+ __ movptr(dest->as_register(), from_addr);
+ }
break;
case T_INT:
__ movl(dest->as_register(), from_addr);
@@ -1364,6 +1368,12 @@
}
#endif
__ verify_oop(dest->as_register());
+ } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
+#ifdef _LP64
+ if (UseCompressedKlassPointers) {
+ __ decode_klass_not_null(dest->as_register());
+ }
+#endif
}
}
@@ -1705,7 +1715,7 @@
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
- if (k->is_loaded() && !UseCompressedOops) {
+ if (k->is_loaded() && !UseCompressedKlassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
@@ -3446,7 +3456,7 @@
__ mov_metadata(tmp, default_type->constant_encoding());
#ifdef _LP64
if (UseCompressedKlassPointers) {
- __ encode_heap_oop(tmp);
+ __ encode_klass_not_null(tmp);
}
#endif
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1166,7 +1166,7 @@
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
- if (!x->klass()->is_loaded() || UseCompressedOops) {
+ if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
tmp3 = new_register(objectType);
}
__ checkcast(reg, obj.result(), x->klass(),
@@ -1188,7 +1188,7 @@
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
- if (!x->klass()->is_loaded() || UseCompressedOops) {
+ if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
tmp3 = new_register(objectType);
}
__ instanceof(reg, obj.result(), x->klass(),
--- a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -159,7 +159,7 @@
#ifdef _LP64
if (UseCompressedKlassPointers) { // Take care not to kill klass
movptr(t1, klass);
- encode_heap_oop_not_null(t1);
+ encode_klass_not_null(t1);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
} else
#endif
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -209,8 +209,6 @@
Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled
Register rbx_method = rbx; // eventual target of this invocation
- address code_start = __ pc();
-
// here's where control starts out:
__ align(CodeEntryAlignment);
address entry_point = __ pc();
@@ -251,23 +249,7 @@
// rdx_first_arg_addr is live!
- if (TraceMethodHandles) {
- const char* name = vmIntrinsics::name_at(iid);
- if (*name == '_') name += 1;
- const size_t len = strlen(name) + 50;
- char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
- const char* suffix = "";
- if (vmIntrinsics::method_for(iid) == NULL ||
- !vmIntrinsics::method_for(iid)->access_flags().is_public()) {
- if (is_signature_polymorphic_static(iid))
- suffix = "/static";
- else
- suffix = "/private";
- }
- jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
- // note: stub look for mh in rcx
- trace_method_handle(_masm, qname);
- }
+ trace_method_handle_interpreter_entry(_masm, iid);
if (iid == vmIntrinsics::_invokeBasic) {
generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
@@ -287,14 +269,6 @@
generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
}
- if (PrintMethodHandleStubs) {
- address code_end = __ pc();
- tty->print_cr("--------");
- tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid));
- Disassembler::decode(code_start, code_end);
- tty->cr();
- }
-
return entry_point;
}
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -55,8 +55,6 @@
Register temp2,
bool for_compiler_entry);
- static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
-
static Register saved_last_sp_register() {
// Should be in sharedRuntime, not here.
return LP64_ONLY(r13) NOT_LP64(rsi);
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -212,11 +212,11 @@
if (is_vtable_stub) {
// Vtable stub size
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
- (UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
+ (UseCompressedKlassPointers ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
} else {
// Itable stub size
return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
- (UseCompressedOops ? 32 : 0); // 2 leaqs
+ (UseCompressedKlassPointers ? 32 : 0); // 2 leaqs
}
// In order to tune these parameters, run the JVM with VM options
// +PrintMiscellaneous and +WizardMode to see information about
--- a/hotspot/src/cpu/x86/vm/x86_32.ad Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad Fri Oct 12 09:22:52 2012 -0700
@@ -1424,6 +1424,11 @@
return true;
}
+bool Matcher::narrow_klass_use_complex_address() {
+ ShouldNotCallThis();
+ return true;
+}
+
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
@@ -1553,9 +1558,6 @@
// Returns true if the high 32 bits of the value is known to be zero.
bool is_operand_hi32_zero(Node* n) {
int opc = n->Opcode();
- if (opc == Op_LoadUI2L) {
- return true;
- }
if (opc == Op_AndL) {
Node* o2 = n->in(2);
if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
@@ -6147,8 +6149,8 @@
%}
// Load Unsigned Integer into Long Register
-instruct loadUI2L(eRegL dst, memory mem, eFlagsReg cr) %{
- match(Set dst (LoadUI2L mem));
+instruct loadUI2L(eRegL dst, memory mem, immL_32bits mask, eFlagsReg cr) %{
+ match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
effect(KILL cr);
ins_cost(250);
--- a/hotspot/src/cpu/x86/vm/x86_64.ad Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad Fri Oct 12 09:22:52 2012 -0700
@@ -1409,10 +1409,10 @@
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
- if (Universe::narrow_oop_shift() != 0) {
- st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
+ if (Universe::narrow_klass_shift() != 0) {
+ st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
}
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
} else {
@@ -1428,7 +1428,7 @@
{
MacroAssembler masm(&cbuf);
uint insts_size = cbuf.insts_size();
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
masm.load_klass(rscratch1, j_rarg0);
masm.cmpptr(rax, rscratch1);
} else {
@@ -1576,6 +1576,11 @@
return (LogMinObjAlignmentInBytes <= 3);
}
+bool Matcher::narrow_klass_use_complex_address() {
+ assert(UseCompressedKlassPointers, "only for compressed klass code");
+ return (LogKlassAlignmentInBytes <= 3);
+}
+
// Is it better to copy float constants, or load them directly from
// memory? Intel can load a float constant from a direct address,
// requiring no extra registers. Most RISCs will have to materialize
@@ -3139,6 +3144,14 @@
interface(CONST_INTER);
%}
+operand immNKlass() %{
+ match(ConNKlass);
+
+ op_cost(10);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
// NULL Pointer Immediate
operand immN0() %{
predicate(n->get_narrowcon() == 0);
@@ -4038,6 +4051,145 @@
%}
%}
+operand indirectNarrowKlass(rRegN reg)
+%{
+ predicate(Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(DecodeNKlass reg);
+
+ format %{ "[$reg]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index(0x4);
+ scale(0x0);
+ disp(0x0);
+ %}
+%}
+
+operand indOffset8NarrowKlass(rRegN reg, immL8 off)
+%{
+ predicate(Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (DecodeNKlass reg) off);
+
+ format %{ "[$reg + $off (8-bit)]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index(0x4);
+ scale(0x0);
+ disp($off);
+ %}
+%}
+
+operand indOffset32NarrowKlass(rRegN reg, immL32 off)
+%{
+ predicate(Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (DecodeNKlass reg) off);
+
+ format %{ "[$reg + $off (32-bit)]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index(0x4);
+ scale(0x0);
+ disp($off);
+ %}
+%}
+
+operand indIndexOffsetNarrowKlass(rRegN reg, rRegL lreg, immL32 off)
+%{
+ predicate(Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (AddP (DecodeNKlass reg) lreg) off);
+
+ op_cost(10);
+ format %{"[$reg + $off + $lreg]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($lreg);
+ scale(0x0);
+ disp($off);
+ %}
+%}
+
+operand indIndexNarrowKlass(rRegN reg, rRegL lreg)
+%{
+ predicate(Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (DecodeNKlass reg) lreg);
+
+ op_cost(10);
+ format %{"[$reg + $lreg]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($lreg);
+ scale(0x0);
+ disp(0x0);
+ %}
+%}
+
+operand indIndexScaleNarrowKlass(rRegN reg, rRegL lreg, immI2 scale)
+%{
+ predicate(Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (DecodeNKlass reg) (LShiftL lreg scale));
+
+ op_cost(10);
+ format %{"[$reg + $lreg << $scale]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($lreg);
+ scale($scale);
+ disp(0x0);
+ %}
+%}
+
+operand indIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
+%{
+ predicate(Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (AddP (DecodeNKlass reg) (LShiftL lreg scale)) off);
+
+ op_cost(10);
+ format %{"[$reg + $off + $lreg << $scale]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($lreg);
+ scale($scale);
+ disp($off);
+ %}
+%}
+
+operand indCompressedKlassOffset(rRegN reg, immL32 off) %{
+ predicate(UseCompressedKlassPointers && (Universe::narrow_klass_shift() == Address::times_8));
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (DecodeNKlass reg) off);
+
+ op_cost(10);
+ format %{"[R12 + $reg << 3 + $off] (compressed klass addressing)" %}
+ interface(MEMORY_INTER) %{
+ base(0xc); // R12
+ index($reg);
+ scale(0x3);
+ disp($off);
+ %}
+%}
+
+operand indPosIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegI idx, immI2 scale)
+%{
+ constraint(ALLOC_IN_RC(ptr_reg));
+ predicate(Universe::narrow_klass_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
+ match(AddP (AddP (DecodeNKlass reg) (LShiftL (ConvI2L idx) scale)) off);
+
+ op_cost(10);
+ format %{"[$reg + $off + $idx << $scale]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($idx);
+ scale($scale);
+ disp($off);
+ %}
+%}
//----------Special Memory Operands--------------------------------------------
// Stack Slot Operand - This operand is used for loading and storing temporary
@@ -4209,7 +4361,11 @@
indCompressedOopOffset,
indirectNarrow, indOffset8Narrow, indOffset32Narrow,
indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
- indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
+ indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow,
+ indCompressedKlassOffset,
+ indirectNarrowKlass, indOffset8NarrowKlass, indOffset32NarrowKlass,
+ indIndexOffsetNarrowKlass, indIndexNarrowKlass, indIndexScaleNarrowKlass,
+ indIndexScaleOffsetNarrowKlass, indPosIndexScaleOffsetNarrowKlass);
//----------PIPELINE-----------------------------------------------------------
// Rules which define the behavior of the target architectures pipeline.
@@ -5044,9 +5200,9 @@
%}
// Load Unsigned Integer into Long Register
-instruct loadUI2L(rRegL dst, memory mem)
-%{
- match(Set dst (LoadUI2L mem));
+instruct loadUI2L(rRegL dst, memory mem, immL_32bits mask)
+%{
+ match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
ins_cost(125);
format %{ "movl $dst, $mem\t# uint -> long" %}
@@ -5469,6 +5625,22 @@
ins_pipe(ialu_reg_fat); // XXX
%}
+instruct loadConNKlass(rRegN dst, immNKlass src) %{
+ match(Set dst src);
+
+ ins_cost(125);
+ format %{ "movl $dst, $src\t# compressed klass ptr" %}
+ ins_encode %{
+ address con = (address)$src$$constant;
+ if (con == NULL) {
+ ShouldNotReachHere();
+ } else {
+ __ set_narrow_klass($dst$$Register, (Klass*)$src$$constant);
+ }
+ %}
+ ins_pipe(ialu_reg_fat); // XXX
+%}
+
instruct loadConF0(regF dst, immF0 src)
%{
match(Set dst src);
@@ -5738,7 +5910,7 @@
instruct storeImmP0(memory mem, immP0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreP mem zero));
ins_cost(125); // XXX
@@ -5774,9 +5946,21 @@
ins_pipe(ialu_mem_reg);
%}
+instruct storeNKlass(memory mem, rRegN src)
+%{
+ match(Set mem (StoreNKlass mem src));
+
+ ins_cost(125); // XXX
+ format %{ "movl $mem, $src\t# compressed klass ptr" %}
+ ins_encode %{
+ __ movl($mem$$Address, $src$$Register);
+ %}
+ ins_pipe(ialu_mem_reg);
+%}
+
instruct storeImmN0(memory mem, immN0 zero)
%{
- predicate(Universe::narrow_oop_base() == NULL);
+ predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
match(Set mem (StoreN mem zero));
ins_cost(125); // XXX
@@ -5804,10 +5988,22 @@
ins_pipe(ialu_mem_imm);
%}
+instruct storeImmNKlass(memory mem, immNKlass src)
+%{
+ match(Set mem (StoreNKlass mem src));
+
+ ins_cost(150); // XXX
+ format %{ "movl $mem, $src\t# compressed klass ptr" %}
+ ins_encode %{
+ __ set_narrow_klass($mem$$Address, (Klass*)$src$$constant);
+ %}
+ ins_pipe(ialu_mem_imm);
+%}
+
// Store Integer Immediate
instruct storeImmI0(memory mem, immI0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreI mem zero));
ins_cost(125); // XXX
@@ -5832,7 +6028,7 @@
// Store Long Immediate
instruct storeImmL0(memory mem, immL0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreL mem zero));
ins_cost(125); // XXX
@@ -5857,7 +6053,7 @@
// Store Short/Char Immediate
instruct storeImmC0(memory mem, immI0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreC mem zero));
ins_cost(125); // XXX
@@ -5883,7 +6079,7 @@
// Store Byte Immediate
instruct storeImmB0(memory mem, immI0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreB mem zero));
ins_cost(125); // XXX
@@ -5908,7 +6104,7 @@
// Store CMS card-mark Immediate
instruct storeImmCM0_reg(memory mem, immI0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreCM mem zero));
ins_cost(125); // XXX
@@ -5946,7 +6142,7 @@
// Store immediate Float value (it is faster than store from XMM register)
instruct storeF0(memory mem, immF0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreF mem zero));
ins_cost(25); // XXX
@@ -5996,7 +6192,7 @@
instruct storeD0(memory mem, immD0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set mem (StoreD mem zero));
ins_cost(25); // XXX
@@ -6482,6 +6678,32 @@
ins_pipe(ialu_reg_long);
%}
+instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
+ match(Set dst (EncodePKlass src));
+ effect(KILL cr);
+ format %{ "encode_heap_oop_not_null $dst,$src" %}
+ ins_encode %{
+ __ encode_klass_not_null($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(ialu_reg_long);
+%}
+
+instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
+ match(Set dst (DecodeNKlass src));
+ effect(KILL cr);
+ format %{ "decode_heap_oop_not_null $dst,$src" %}
+ ins_encode %{
+ Register s = $src$$Register;
+ Register d = $dst$$Register;
+ if (s != d) {
+ __ decode_klass_not_null(d, s);
+ } else {
+ __ decode_klass_not_null(d);
+ }
+ %}
+ ins_pipe(ialu_reg_long);
+%}
+
//----------Conditional Move---------------------------------------------------
// Jump
@@ -10452,7 +10674,7 @@
instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
%{
- predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
+ predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
match(Set cr (CmpP (LoadP mem) zero));
format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %}
@@ -10503,6 +10725,27 @@
ins_pipe(ialu_cr_reg_mem);
%}
+instruct compN_rReg_imm_klass(rFlagsRegU cr, rRegN op1, immNKlass op2) %{
+ match(Set cr (CmpN op1 op2));
+
+ format %{ "cmpl $op1, $op2\t# compressed klass ptr" %}
+ ins_encode %{
+ __ cmp_narrow_klass($op1$$Register, (Klass*)$op2$$constant);
+ %}
+ ins_pipe(ialu_cr_reg_imm);
+%}
+
+instruct compN_mem_imm_klass(rFlagsRegU cr, memory mem, immNKlass src)
+%{
+ match(Set cr (CmpN src (LoadNKlass mem)));
+
+ format %{ "cmpl $mem, $src\t# compressed klass ptr" %}
+ ins_encode %{
+ __ cmp_narrow_klass($mem$$Address, (Klass*)$src$$constant);
+ %}
+ ins_pipe(ialu_cr_reg_mem);
+%}
+
instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
match(Set cr (CmpN src zero));
@@ -10526,7 +10769,7 @@
instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero)
%{
- predicate(Universe::narrow_oop_base() == NULL);
+ predicate(Universe::narrow_oop_base() == NULL && (Universe::narrow_klass_base() == NULL));
match(Set cr (CmpN (LoadN mem) zero));
format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %}
--- a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -267,8 +267,8 @@
printf("\n");
- GEN_OFFS(NarrowOopStruct, _base);
- GEN_OFFS(NarrowOopStruct, _shift);
+ GEN_OFFS(NarrowPtrStruct, _base);
+ GEN_OFFS(NarrowPtrStruct, _shift);
printf("\n");
GEN_VALUE(SIZE_HeapBlockHeader, (int) sizeof(HeapBlock::Header));
--- a/hotspot/src/os/bsd/dtrace/jhelper.d Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/os/bsd/dtrace/jhelper.d Fri Oct 12 09:22:52 2012 -0700
@@ -45,10 +45,6 @@
extern pointer __1cJCodeCacheF_heap_;
extern pointer __1cIUniverseO_collectedHeap_;
-extern pointer __1cIUniverseL_narrow_oop_;
-#ifdef _LP64
-extern pointer UseCompressedOops;
-#endif
extern pointer __1cHnmethodG__vtbl_;
extern pointer __1cNMethodG__vtbl_;
@@ -136,8 +132,8 @@
copyin_offset(SIZE_oopDesc);
copyin_offset(SIZE_ConstantPool);
- copyin_offset(OFFSET_NarrowOopStruct_base);
- copyin_offset(OFFSET_NarrowOopStruct_shift);
+ copyin_offset(OFFSET_NarrowPtrStruct_base);
+ copyin_offset(OFFSET_NarrowPtrStruct_shift);
/*
* The PC to translate is in arg0.
@@ -159,17 +155,6 @@
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
/* Reading volatile values */
-#ifdef _LP64
- this->Use_Compressed_Oops = copyin_uint8(&``UseCompressedOops);
-#else
- this->Use_Compressed_Oops = 0;
-#endif
-
- this->Universe_narrow_oop_base = copyin_ptr(&``__1cIUniverseL_narrow_oop_ +
- OFFSET_NarrowOopStruct_base);
- this->Universe_narrow_oop_shift = copyin_int32(&``__1cIUniverseL_narrow_oop_ +
- OFFSET_NarrowOopStruct_shift);
-
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
--- a/hotspot/src/os/linux/vm/vmError_linux.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/os/linux/vm/vmError_linux.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -44,7 +44,7 @@
jio_snprintf(p, buflen - len,
"\n\n"
"Do you want to debug the problem?\n\n"
- "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " INTX_FORMAT " (" INTPTR_FORMAT ")\n"
+ "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " UINTX_FORMAT " (" INTPTR_FORMAT ")\n"
"Enter 'yes' to launch gdb automatically (PATH must include gdb)\n"
"Otherwise, press RETURN to abort...",
os::current_process_id(), os::current_process_id(),
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -262,8 +262,8 @@
printf("\n");
- GEN_OFFS(NarrowOopStruct, _base);
- GEN_OFFS(NarrowOopStruct, _shift);
+ GEN_OFFS(NarrowPtrStruct, _base);
+ GEN_OFFS(NarrowPtrStruct, _shift);
printf("\n");
GEN_VALUE(SIZE_HeapBlockHeader, sizeof(HeapBlock::Header));
--- a/hotspot/src/os/solaris/dtrace/jhelper.d Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/os/solaris/dtrace/jhelper.d Fri Oct 12 09:22:52 2012 -0700
@@ -45,10 +45,6 @@
extern pointer __1cJCodeCacheF_heap_;
extern pointer __1cIUniverseO_collectedHeap_;
-extern pointer __1cIUniverseL_narrow_oop_;
-#ifdef _LP64
-extern pointer UseCompressedOops;
-#endif
extern pointer __1cHnmethodG__vtbl_;
extern pointer __1cGMethodG__vtbl_;
@@ -136,8 +132,8 @@
copyin_offset(SIZE_oopDesc);
copyin_offset(SIZE_ConstantPool);
- copyin_offset(OFFSET_NarrowOopStruct_base);
- copyin_offset(OFFSET_NarrowOopStruct_shift);
+ copyin_offset(OFFSET_NarrowPtrStruct_base);
+ copyin_offset(OFFSET_NarrowPtrStruct_shift);
/*
* The PC to translate is in arg0.
@@ -158,18 +154,6 @@
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
- /* Reading volatile values */
-#ifdef _LP64
- this->Use_Compressed_Oops = copyin_uint8(&``UseCompressedOops);
-#else
- this->Use_Compressed_Oops = 0;
-#endif
-
- this->Universe_narrow_oop_base = copyin_ptr(&``__1cIUniverseL_narrow_oop_ +
- OFFSET_NarrowOopStruct_base);
- this->Universe_narrow_oop_shift = copyin_int32(&``__1cIUniverseL_narrow_oop_ +
- OFFSET_NarrowOopStruct_shift);
-
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
--- a/hotspot/src/share/vm/adlc/adlparse.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/adlparse.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1392,7 +1392,7 @@
_AD.addForm(machnode);
}
else if (!strcmp(ident, "attributes")) {
- bool vsi_seen = false, bhds_seen = false;
+ bool vsi_seen = false;
skipws();
if ( (_curchar != '%')
@@ -1436,7 +1436,6 @@
}
pipeline->_branchHasDelaySlot = true;
- bhds_seen = true;
continue;
}
@@ -1639,6 +1638,12 @@
next_char(); // Skip "(" or ","
ident = get_ident(); // Grab next identifier
+ if (_AD._adl_debug > 1) {
+ if (ident != NULL) {
+ fprintf(stderr, "resource_parse: identifier: %s\n", ident);
+ }
+ }
+
if (ident == NULL) {
parse_err(SYNERR, "keyword identifier expected at \"%c\"\n", _curchar);
return;
@@ -2427,7 +2432,6 @@
int lparen = 0; // keep track of parenthesis nesting depth
int rparen = 0; // position of instruction at this depth
InstructForm *inst_seen = NULL;
- InstructForm *child_seen = NULL;
// Walk the match tree,
// Record <parent, position, instruction name, input position>
@@ -2437,7 +2441,7 @@
if (_curchar == '(') {
++lparen;
next_char();
- child_seen = peep_match_child_parse(match, parent, position, rparen);
+ ( void ) peep_match_child_parse(match, parent, position, rparen);
}
// Right paren signals end of an input, may be more
else if (_curchar == ')') {
@@ -3154,6 +3158,9 @@
//------------------------------size_parse-----------------------------------
+// Parse a 'size(<expr>)' attribute which specifies the size of the
+// emitted instructions in bytes. <expr> can be a C++ expression,
+// e.g. a constant.
char* ADLParser::size_parse(InstructForm *instr) {
char* sizeOfInstr = NULL;
@@ -4274,7 +4281,17 @@
|| ((c >= '0') && (c <= '9'))
|| ((c == '_')) || ((c == ':')) || ((c == '#')) );
if (start == end) { // We popped out on the first try
- parse_err(SYNERR, "identifier expected at %c\n", c);
+ // It can occur that `start' contains the rest of the input file.
+ // In this case the output should be truncated.
+ if (strlen(start) > 24) {
+ char buf[32];
+ strncpy(buf, start, 20);
+ buf[20] = '\0';
+ strcat(buf, "[...]");
+ parse_err(SYNERR, "Identifier expected, but found '%s'.", buf);
+ } else {
+ parse_err(SYNERR, "Identifier expected, but found '%s'.", start);
+ }
start = NULL;
}
else {
--- a/hotspot/src/share/vm/adlc/archDesc.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -221,6 +221,7 @@
_register = NULL;
_encode = NULL;
_pipeline = NULL;
+ _frame = NULL;
}
ArchDesc::~ArchDesc() {
@@ -648,7 +649,10 @@
// Return the textual binding for a given CPP flag name.
// Return NULL if there is no binding, or it has been #undef-ed.
char* ArchDesc::get_preproc_def(const char* flag) {
- SourceForm* deff = (SourceForm*) _preproc_table[flag];
+ // In case of syntax errors, flag may take the value NULL.
+ SourceForm* deff = NULL;
+ if (flag != NULL)
+ deff = (SourceForm*) _preproc_table[flag];
return (deff == NULL) ? NULL : deff->_code;
}
@@ -803,7 +807,9 @@
while (i++ <= 15) fputc(' ', errfile);
fprintf(errfile, "%-8s:", pref);
vfprintf(errfile, fmt, args);
- fprintf(errfile, "\n"); }
+ fprintf(errfile, "\n");
+ fflush(errfile);
+ }
return 1;
}
@@ -855,8 +861,14 @@
// Check constraints on result's register class
const char *result_class = opForm.constrained_reg_class();
- if (!result_class) opForm.dump();
- assert( result_class, "Resulting register class was not defined for operand");
+ if (result_class == NULL) {
+ opForm.dump();
+ syntax_err(opForm._linenum,
+ "Use of an undefined result class for operand: %s",
+ opForm._ident);
+ abort();
+ }
+
regMask = reg_class_to_reg_mask( result_class );
return regMask;
@@ -865,8 +877,14 @@
// Obtain the name of the RegMask for an InstructForm
const char *ArchDesc::reg_mask(InstructForm &inForm) {
const char *result = inForm.reduce_result();
- assert( result,
- "Did not find result operand or RegMask for this instruction");
+
+ if (result == NULL) {
+ syntax_err(inForm._linenum,
+ "Did not find result operand or RegMask"
+ " for this instruction: %s",
+ inForm._ident);
+ abort();
+ }
// Instructions producing 'Universe' use RegMask::Empty
if( strcmp(result,"Universe")==0 ) {
@@ -875,10 +893,17 @@
// Lookup this result operand and get its register class
Form *form = (Form*)_globalNames[result];
- assert( form, "Result operand must be defined");
+ if (form == NULL) {
+ syntax_err(inForm._linenum,
+ "Did not find result operand for result: %s", result);
+ abort();
+ }
OperandForm *oper = form->is_operand();
- if (oper == NULL) form->dump();
- assert( oper, "Result must be an OperandForm");
+ if (oper == NULL) {
+ syntax_err(inForm._linenum, "Form is not an OperandForm:");
+ form->dump();
+ abort();
+ }
return reg_mask( *oper );
}
@@ -887,7 +912,13 @@
char *ArchDesc::stack_or_reg_mask(OperandForm &opForm) {
// name of cisc_spillable version
const char *reg_mask_name = reg_mask(opForm);
- assert( reg_mask_name != NULL, "called with incorrect opForm");
+
+ if (reg_mask_name == NULL) {
+ syntax_err(opForm._linenum,
+ "Did not find reg_mask for opForm: %s",
+ opForm._ident);
+ abort();
+ }
const char *stack_or = "STACK_OR_";
int length = (int)strlen(stack_or) + (int)strlen(reg_mask_name) + 1;
@@ -968,7 +999,8 @@
// Create InstructForm and assign type for each ideal instruction.
for ( int j = _last_machine_leaf+1; j < _last_opcode; ++j) {
char *ident = (char *)NodeClassNames[j];
- if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || !strcmp(ident, "ConN") ||
+ if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") ||
+ !strcmp(ident, "ConN") || !strcmp(ident, "ConNKlass") ||
!strcmp(ident, "ConF") || !strcmp(ident, "ConD") ||
!strcmp(ident, "ConL") || !strcmp(ident, "Con" ) ||
!strcmp(ident, "Bool") ) {
--- a/hotspot/src/share/vm/adlc/archDesc.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/archDesc.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -365,13 +365,14 @@
// A derived class defines the appropriate output for a specific mapping.
class OutputMap {
protected:
- FILE *_hpp;
- FILE *_cpp;
- FormDict &_globals;
- ArchDesc &_AD;
+ FILE *_hpp;
+ FILE *_cpp;
+ FormDict &_globals;
+ ArchDesc &_AD;
+ const char *_name;
public:
- OutputMap (FILE *decl_file, FILE *def_file, FormDict &globals, ArchDesc &AD)
- : _hpp(decl_file), _cpp(def_file), _globals(globals), _AD(AD) {};
+ OutputMap (FILE *decl_file, FILE *def_file, FormDict &globals, ArchDesc &AD, const char *name)
+ : _hpp(decl_file), _cpp(def_file), _globals(globals), _AD(AD), _name(name) {};
// Access files used by this routine
FILE *decl_file() { return _hpp; }
FILE *def_file() { return _cpp; }
--- a/hotspot/src/share/vm/adlc/dict2.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/dict2.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -33,7 +33,7 @@
// String hash tables
#define MAXID 20
static char initflag = 0; // True after 1st initialization
-static char shft[MAXID] = {1,2,3,4,5,6,7,1,2,3,4,5,6,7,1,2,3,4,5,6};
+static char shft[MAXID + 1] = {1,2,3,4,5,6,7,1,2,3,4,5,6,7,1,2,3,4,5,6,7};
static short xsum[MAXID];
//------------------------------bucket---------------------------------------
--- a/hotspot/src/share/vm/adlc/filebuff.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/filebuff.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -31,10 +31,14 @@
using namespace std;
// STRUCTURE FOR HANDLING INPUT AND OUTPUT FILES
-typedef struct {
+
+class BufferedFile {
+ public:
const char *_name;
FILE *_fp;
-} BufferedFile;
+ inline BufferedFile() { _name = NULL; _fp = NULL; };
+ inline ~BufferedFile() {};
+};
class ArchDesc;
--- a/hotspot/src/share/vm/adlc/forms.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/forms.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -215,6 +215,7 @@
if (strcmp(name,"ConI")==0) return Form::idealI;
if (strcmp(name,"ConP")==0) return Form::idealP;
if (strcmp(name,"ConN")==0) return Form::idealN;
+ if (strcmp(name,"ConNKlass")==0) return Form::idealNKlass;
if (strcmp(name,"ConL")==0) return Form::idealL;
if (strcmp(name,"ConF")==0) return Form::idealF;
if (strcmp(name,"ConD")==0) return Form::idealD;
@@ -255,9 +256,8 @@
if( strcmp(opType,"LoadD_unaligned")==0 ) return Form::idealD;
if( strcmp(opType,"LoadF")==0 ) return Form::idealF;
if( strcmp(opType,"LoadI")==0 ) return Form::idealI;
- if( strcmp(opType,"LoadUI2L")==0 ) return Form::idealI;
if( strcmp(opType,"LoadKlass")==0 ) return Form::idealP;
- if( strcmp(opType,"LoadNKlass")==0 ) return Form::idealN;
+ if( strcmp(opType,"LoadNKlass")==0 ) return Form::idealNKlass;
if( strcmp(opType,"LoadL")==0 ) return Form::idealL;
if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL;
if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;
@@ -280,6 +280,7 @@
if( strcmp(opType,"StoreL")==0) return Form::idealL;
if( strcmp(opType,"StoreP")==0) return Form::idealP;
if( strcmp(opType,"StoreN")==0) return Form::idealN;
+ if( strcmp(opType,"StoreNKlass")==0) return Form::idealNKlass;
if( strcmp(opType,"StoreVector")==0 ) return Form::idealV;
assert( strcmp(opType,"Store") != 0, "Must type Stores" );
return Form::none;
--- a/hotspot/src/share/vm/adlc/forms.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/forms.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -173,7 +173,8 @@
idealC = 7, // Char type
idealS = 8, // String type
idealN = 9, // Narrow oop types
- idealV = 10 // Vector type
+ idealNKlass = 10, // Narrow klass types
+ idealV = 11 // Vector type
};
// Convert ideal name to a DataType, return DataType::none if not a 'ConX'
Form::DataType ideal_to_const_type(const char *ideal_type_name) const;
@@ -448,11 +449,11 @@
// Return number of USEs + number of DEFs
int num_operands();
// Return zero-based position in list; -1 if not in list.
- int operand_position(const char *name, int usedef);
+ int operand_position(const char *name, int usedef, Form *fm);
// Find position for this name, regardless of use/def information
int operand_position(const char *name);
// Find position for this name when looked up for output via "format"
- int operand_position_format(const char *name);
+ int operand_position_format(const char *name, Form *fm);
// Find position for the Label when looked up for output via "format"
int label_position();
// Find position for the Method when looked up for output via "format"
--- a/hotspot/src/share/vm/adlc/formssel.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -567,7 +567,7 @@
if( strcmp(rc_name,"stack_slots") ) {
// Check for ideal_type of RegFlags
const char *type = opform->ideal_type( globals, registers );
- if( !strcmp(type,"RegFlags") )
+ if( (type != NULL) && !strcmp(type, "RegFlags") )
rematerialize = true;
} else
rematerialize = false; // Do not rematerialize things target stk
@@ -746,14 +746,16 @@
// Expected use is for pointer vs oop determination for LoadP
bool InstructForm::captures_bottom_type(FormDict &globals) const {
if( _matrule && _matrule->_rChild &&
- (!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
- !strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
- !strcmp(_matrule->_rChild->_opType,"DecodeN") ||
- !strcmp(_matrule->_rChild->_opType,"EncodeP") ||
- !strcmp(_matrule->_rChild->_opType,"LoadN") ||
- !strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
- !strcmp(_matrule->_rChild->_opType,"LoadNKlass") ||
- !strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
+ (!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
+ !strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
+ !strcmp(_matrule->_rChild->_opType,"DecodeN") ||
+ !strcmp(_matrule->_rChild->_opType,"EncodeP") ||
+ !strcmp(_matrule->_rChild->_opType,"DecodeNKlass") ||
+ !strcmp(_matrule->_rChild->_opType,"EncodePKlass") ||
+ !strcmp(_matrule->_rChild->_opType,"LoadN") ||
+ !strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
+ !strcmp(_matrule->_rChild->_opType,"LoadNKlass") ||
+ !strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
!strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
else if ( is_ideal_load() == Form::idealP ) return true;
else if ( is_ideal_store() != Form::none ) return true;
@@ -793,6 +795,20 @@
return num_opnds;
}
+const char *InstructForm::opnd_ident(int idx) {
+ return _components.at(idx)->_name;
+}
+
+const char *InstructForm::unique_opnd_ident(int idx) {
+ uint i;
+ for (i = 1; i < num_opnds(); ++i) {
+ if (unique_opnds_idx(i) == idx) {
+ break;
+ }
+ }
+ return (_components.at(i) != NULL) ? _components.at(i)->_name : "";
+}
+
// Return count of unmatched operands.
uint InstructForm::num_post_match_opnds() {
uint num_post_match_opnds = _components.count();
@@ -864,6 +880,9 @@
return base;
}
+// This function determines the order of the MachOper in _opnds[]
+// by writing the operand names into the _components list.
+//
// Implementation does not modify state of internal structures
void InstructForm::build_components() {
// Add top-level operands to the components
@@ -959,11 +978,11 @@
// Return zero-based position in component list; -1 if not in list.
int InstructForm::operand_position(const char *name, int usedef) {
- return unique_opnds_idx(_components.operand_position(name, usedef));
+ return unique_opnds_idx(_components.operand_position(name, usedef, this));
}
int InstructForm::operand_position_format(const char *name) {
- return unique_opnds_idx(_components.operand_position_format(name));
+ return unique_opnds_idx(_components.operand_position_format(name, this));
}
// Return zero-based position in component list; -1 if not in list.
@@ -1223,7 +1242,7 @@
if (different) {
globalAD->syntax_err(short_branch->_linenum, "Instruction %s and its short form %s have different parameters\n", _ident, short_branch->_ident);
}
- if (AD._short_branch_debug) {
+ if (AD._adl_debug > 1 || AD._short_branch_debug) {
fprintf(stderr, "Instruction %s has short form %s\n", _ident, short_branch->_ident);
}
_short_branch_form = short_branch;
@@ -1255,16 +1274,19 @@
// Find replacement variable's type
const Form *form = _localNames[rep_var];
if (form == NULL) {
- fprintf(stderr, "unknown replacement variable in format statement: '%s'\n", rep_var);
- assert(false, "ShouldNotReachHere()");
+ globalAD->syntax_err(_linenum, "Unknown replacement variable %s in format statement of %s.",
+ rep_var, _ident);
+ return;
}
OpClassForm *opc = form->is_opclass();
assert( opc, "replacement variable was not found in local names");
// Lookup the index position of the replacement variable
int idx = operand_position_format(rep_var);
if ( idx == -1 ) {
- assert( strcmp(opc->_ident,"label")==0, "Unimplemented");
- assert( false, "ShouldNotReachHere()");
+ globalAD->syntax_err(_linenum, "Could not find replacement variable %s in format statement of %s.\n",
+ rep_var, _ident);
+ assert(strcmp(opc->_ident, "label") == 0, "Unimplemented");
+ return;
}
if (is_noninput_operand(idx)) {
@@ -1273,7 +1295,7 @@
OperandForm* oper = form->is_operand();
if (oper != NULL && oper->is_bound_register()) {
const RegDef* first = oper->get_RegClass()->find_first_elem();
- fprintf(fp, " tty->print(\"%s\");\n", first->_regname);
+ fprintf(fp, " st->print(\"%s\");\n", first->_regname);
} else {
globalAD->syntax_err(_linenum, "In %s can't find format for %s %s", _ident, opc->_ident, rep_var);
}
@@ -1371,26 +1393,28 @@
// idx0=0 is used to indicate that info comes from this same node, not from input edge.
// idx1 starts at oper_input_base()
if ( cur_num_opnds >= 1 ) {
- fprintf(fp," // Start at oper_input_base() and count operands\n");
- fprintf(fp," unsigned %sidx0 = %d;\n", prefix, oper_input_base(globals));
- fprintf(fp," unsigned %sidx1 = %d;\n", prefix, oper_input_base(globals));
+ fprintf(fp," // Start at oper_input_base() and count operands\n");
+ fprintf(fp," unsigned %sidx0 = %d;\n", prefix, oper_input_base(globals));
+ fprintf(fp," unsigned %sidx1 = %d;", prefix, oper_input_base(globals));
+ fprintf(fp," \t// %s\n", unique_opnd_ident(1));
// Generate starting points for other unique operands if they exist
for ( idx = 2; idx < num_unique_opnds(); ++idx ) {
if( *receiver == 0 ) {
- fprintf(fp," unsigned %sidx%d = %sidx%d + opnd_array(%d)->num_edges();\n",
+ fprintf(fp," unsigned %sidx%d = %sidx%d + opnd_array(%d)->num_edges();",
prefix, idx, prefix, idx-1, idx-1 );
} else {
- fprintf(fp," unsigned %sidx%d = %sidx%d + %s_opnds[%d]->num_edges();\n",
+ fprintf(fp," unsigned %sidx%d = %sidx%d + %s_opnds[%d]->num_edges();",
prefix, idx, prefix, idx-1, receiver, idx-1 );
}
+ fprintf(fp," \t// %s\n", unique_opnd_ident(idx));
}
}
if( *receiver != 0 ) {
// This value is used by generate_peepreplace when copying a node.
// Don't emit it in other cases since it can hide bugs with the
// use invalid idx's.
- fprintf(fp," unsigned %sidx%d = %sreq(); \n", prefix, idx, receiver);
+ fprintf(fp," unsigned %sidx%d = %sreq(); \n", prefix, idx, receiver);
}
}
@@ -1774,9 +1798,25 @@
return Component::INVALID;
}
+const char *Component::getUsedefName() {
+ switch (_usedef) {
+ case Component::INVALID: return "INVALID"; break;
+ case Component::USE: return "USE"; break;
+ case Component::USE_DEF: return "USE_DEF"; break;
+ case Component::USE_KILL: return "USE_KILL"; break;
+ case Component::KILL: return "KILL"; break;
+ case Component::TEMP: return "TEMP"; break;
+ case Component::DEF: return "DEF"; break;
+ case Component::CALL: return "CALL"; break;
+ default: assert(false, "unknown effect");
+ }
+ return "Undefined Use/Def info";
+}
+
Effect::Effect(const char *name) : _name(name), _use_def(effect_lookup(name)) {
_ftype = Form::EFF;
}
+
Effect::~Effect() {
}
@@ -2273,7 +2313,7 @@
}
int OperandForm::operand_position(const char *name, int usedef) {
- return _components.operand_position(name, usedef);
+ return _components.operand_position(name, usedef, this);
}
@@ -2399,20 +2439,20 @@
if (_matrule && (_matrule->is_base_register(globals) ||
strcmp(ideal_type(globalAD->globalNames()), "RegFlags") == 0)) {
// !!!!! !!!!!
- fprintf(fp, "{ char reg_str[128];\n");
- fprintf(fp," ra->dump_register(node,reg_str);\n");
- fprintf(fp," tty->print(\"%cs\",reg_str);\n",'%');
- fprintf(fp," }\n");
+ fprintf(fp," { char reg_str[128];\n");
+ fprintf(fp," ra->dump_register(node,reg_str);\n");
+ fprintf(fp," st->print(\"%cs\",reg_str);\n",'%');
+ fprintf(fp," }\n");
} else if (_matrule && (dtype = _matrule->is_base_constant(globals)) != Form::none) {
format_constant( fp, index, dtype );
} else if (ideal_to_sReg_type(_ident) != Form::none) {
// Special format for Stack Slot Register
- fprintf(fp, "{ char reg_str[128];\n");
- fprintf(fp," ra->dump_register(node,reg_str);\n");
- fprintf(fp," tty->print(\"%cs\",reg_str);\n",'%');
- fprintf(fp," }\n");
+ fprintf(fp," { char reg_str[128];\n");
+ fprintf(fp," ra->dump_register(node,reg_str);\n");
+ fprintf(fp," st->print(\"%cs\",reg_str);\n",'%');
+ fprintf(fp," }\n");
} else {
- fprintf(fp,"tty->print(\"No format defined for %s\n\");\n", _ident);
+ fprintf(fp," st->print(\"No format defined for %s\n\");\n", _ident);
fflush(fp);
fprintf(stderr,"No format defined for %s\n", _ident);
dump();
@@ -2426,36 +2466,37 @@
Form::DataType dtype;
if (_matrule && (_matrule->is_base_register(globals) ||
strcmp(ideal_type(globalAD->globalNames()), "RegFlags") == 0)) {
- fprintf(fp, "{ char reg_str[128];\n");
- fprintf(fp," ra->dump_register(node->in(idx");
- if ( index != 0 ) fprintf(fp, "+%d",index);
- fprintf(fp, "),reg_str);\n");
- fprintf(fp," tty->print(\"%cs\",reg_str);\n",'%');
- fprintf(fp," }\n");
+ fprintf(fp," { char reg_str[128];\n");
+ fprintf(fp," ra->dump_register(node->in(idx");
+ if ( index != 0 ) fprintf(fp, "+%d",index);
+ fprintf(fp, "),reg_str);\n");
+ fprintf(fp," st->print(\"%cs\",reg_str);\n",'%');
+ fprintf(fp," }\n");
} else if (_matrule && (dtype = _matrule->is_base_constant(globals)) != Form::none) {
format_constant( fp, index, dtype );
} else if (ideal_to_sReg_type(_ident) != Form::none) {
// Special format for Stack Slot Register
- fprintf(fp, "{ char reg_str[128];\n");
- fprintf(fp," ra->dump_register(node->in(idx");
+ fprintf(fp," { char reg_str[128];\n");
+ fprintf(fp," ra->dump_register(node->in(idx");
if ( index != 0 ) fprintf(fp, "+%d",index);
fprintf(fp, "),reg_str);\n");
- fprintf(fp," tty->print(\"%cs\",reg_str);\n",'%');
- fprintf(fp," }\n");
+ fprintf(fp," st->print(\"%cs\",reg_str);\n",'%');
+ fprintf(fp," }\n");
} else {
- fprintf(fp,"tty->print(\"No format defined for %s\n\");\n", _ident);
+ fprintf(fp," st->print(\"No format defined for %s\n\");\n", _ident);
assert( false,"Internal error:\n output_external_operand() attempting to output other than a Register or Constant");
}
}
void OperandForm::format_constant(FILE *fp, uint const_index, uint const_type) {
switch(const_type) {
- case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
- case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
- case Form::idealN: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
- case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
- case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
- case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+ case Form::idealI: fprintf(fp," st->print(\"#%%d\", _c%d);\n", const_index); break;
+ case Form::idealP: fprintf(fp," if (_c%d) _c%d->dump_on(st);\n", const_index, const_index); break;
+ case Form::idealNKlass:
+ case Form::idealN: fprintf(fp," if (_c%d) _c%d->dump_on(st);\n", const_index, const_index); break;
+ case Form::idealL: fprintf(fp," st->print(\"#%%lld\", _c%d);\n", const_index); break;
+ case Form::idealF: fprintf(fp," st->print(\"#%%f\", _c%d);\n", const_index); break;
+ case Form::idealD: fprintf(fp," st->print(\"#%%f\", _c%d);\n", const_index); break;
default:
assert( false, "ShouldNotReachHere()");
}
@@ -2825,17 +2866,8 @@
fprintf(fp,"Component:"); // Write to output files
fprintf(fp, " name = %s", _name);
fprintf(fp, ", type = %s", _type);
- const char * usedef = "Undefined Use/Def info";
- switch (_usedef) {
- case USE: usedef = "USE"; break;
- case USE_DEF: usedef = "USE_DEF"; break;
- case USE_KILL: usedef = "USE_KILL"; break;
- case KILL: usedef = "KILL"; break;
- case TEMP: usedef = "TEMP"; break;
- case DEF: usedef = "DEF"; break;
- default: assert(false, "unknown effect");
- }
- fprintf(fp, ", use/def = %s\n", usedef);
+ assert(_usedef != 0, "unknown effect");
+ fprintf(fp, ", use/def = %s\n", getUsedefName());
}
@@ -2927,9 +2959,9 @@
return count;
}
-// Return zero-based position in list; -1 if not in list.
+// Return zero-based position of operand 'name' in list; -1 if not in list.
// if parameter 'usedef' is ::USE, it will match USE, USE_DEF, ...
-int ComponentList::operand_position(const char *name, int usedef) {
+int ComponentList::operand_position(const char *name, int usedef, Form *fm) {
PreserveIter pi(this);
int position = 0;
int num_opnds = num_operands();
@@ -2952,10 +2984,18 @@
return position+1;
} else {
if( preceding_non_use && strcmp(component->_name, preceding_non_use->_name) ) {
- fprintf(stderr, "the name '%s' should not precede the name '%s'\n", preceding_non_use->_name, name);
+ fprintf(stderr, "the name '%s(%s)' should not precede the name '%s(%s)'",
+ preceding_non_use->_name, preceding_non_use->getUsedefName(),
+ name, component->getUsedefName());
+ if (fm && fm->is_instruction()) fprintf(stderr, "in form '%s'", fm->is_instruction()->_ident);
+ if (fm && fm->is_operand()) fprintf(stderr, "in form '%s'", fm->is_operand()->_ident);
+ fprintf(stderr, "\n");
}
if( position >= num_opnds ) {
- fprintf(stderr, "the name '%s' is too late in its name list\n", name);
+ fprintf(stderr, "the name '%s' is too late in its name list", name);
+ if (fm && fm->is_instruction()) fprintf(stderr, "in form '%s'", fm->is_instruction()->_ident);
+ if (fm && fm->is_operand()) fprintf(stderr, "in form '%s'", fm->is_operand()->_ident);
+ fprintf(stderr, "\n");
}
assert(position < num_opnds, "advertised index in bounds");
return position;
@@ -3001,10 +3041,10 @@
return Not_in_list;
}
-int ComponentList::operand_position_format(const char *name) {
+int ComponentList::operand_position_format(const char *name, Form *fm) {
PreserveIter pi(this);
int first_position = operand_position(name);
- int use_position = operand_position(name, Component::USE);
+ int use_position = operand_position(name, Component::USE, fm);
return ((first_position < use_position) ? use_position : first_position);
}
@@ -3267,8 +3307,8 @@
// If we are a "Set", start from the right child.
const MatchNode *const mnode = sets_result() ?
- (const MatchNode *const)this->_rChild :
- (const MatchNode *const)this;
+ (const MatchNode *)this->_rChild :
+ (const MatchNode *)this;
// If our right child exists, it is the right reduction
if ( mnode->_rChild ) {
@@ -3285,8 +3325,8 @@
// If we are a "Set", start from the right child.
const MatchNode *const mnode = sets_result() ?
- (const MatchNode *const)this->_rChild :
- (const MatchNode *const)this;
+ (const MatchNode *)this->_rChild :
+ (const MatchNode *)this;
// If our left child exists, it is the left reduction
if ( mnode->_lChild ) {
@@ -3390,9 +3430,9 @@
int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
static const char *needs_ideal_memory_list[] = {
- "StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
+ "StoreI","StoreL","StoreP","StoreN","StoreNKlass","StoreD","StoreF" ,
"StoreB","StoreC","Store" ,"StoreFP",
- "LoadI", "LoadUI2L", "LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF" ,
+ "LoadI", "LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF" ,
"LoadB" , "LoadUB", "LoadUS" ,"LoadS" ,"Load" ,
"StoreVector", "LoadVector",
"LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned",
@@ -3947,6 +3987,8 @@
strcmp(opType,"ConvL2I")==0 ||
strcmp(opType,"DecodeN")==0 ||
strcmp(opType,"EncodeP")==0 ||
+ strcmp(opType,"EncodePKlass")==0 ||
+ strcmp(opType,"DecodeNKlass")==0 ||
strcmp(opType,"RoundDouble")==0 ||
strcmp(opType,"RoundFloat")==0 ||
strcmp(opType,"ReverseBytesI")==0 ||
@@ -4108,12 +4150,17 @@
output(stderr);
}
-void MatchRule::output(FILE *fp) {
+// Write just one line.
+void MatchRule::output_short(FILE *fp) {
fprintf(fp,"MatchRule: ( %s",_name);
if (_lChild) _lChild->output(fp);
if (_rChild) _rChild->output(fp);
- fprintf(fp," )\n");
- fprintf(fp," nesting depth = %d\n", _depth);
+ fprintf(fp," )");
+}
+
+void MatchRule::output(FILE *fp) {
+ output_short(fp);
+ fprintf(fp,"\n nesting depth = %d\n", _depth);
if (_result) fprintf(fp," Result Type = %s", _result);
fprintf(fp,"\n");
}
--- a/hotspot/src/share/vm/adlc/formssel.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -76,7 +76,7 @@
private:
bool _ideal_only; // Not a user-defined instruction
// Members used for tracking CISC-spilling
- uint _cisc_spill_operand;// Which operand may cisc-spill
+ int _cisc_spill_operand;// Which operand may cisc-spill
void set_cisc_spill_operand(uint op_index) { _cisc_spill_operand = op_index; }
bool _is_cisc_alternate;
InstructForm *_cisc_spill_alternate;// cisc possible replacement
@@ -103,7 +103,7 @@
RewriteRule *_rewrule; // Rewrite rule for this instruction
FormatRule *_format; // Format for assembly generation
Peephole *_peephole; // List of peephole rules for instruction
- const char *_ins_pipe; // Instruction Scheduline description class
+ const char *_ins_pipe; // Instruction Scheduling description class
uint *_uniq_idx; // Indexes of unique operands
int _uniq_idx_length; // Length of _uniq_idx array
@@ -198,6 +198,7 @@
virtual const char *cost(); // Access ins_cost attribute
virtual uint num_opnds(); // Count of num_opnds for MachNode class
+ // Counts USE_DEF opnds twice. See also num_unique_opnds().
virtual uint num_post_match_opnds();
virtual uint num_consts(FormDict &globals) const;// Constants in match rule
// Constants in match rule with specified type
@@ -228,6 +229,7 @@
// Return number of relocation entries needed for this instruction.
virtual uint reloc(FormDict &globals);
+ const char *opnd_ident(int idx); // Name of operand #idx.
const char *reduce_result();
// Return the name of the operand on the right hand side of the binary match
// Return NULL if there is no right hand side
@@ -240,7 +242,7 @@
// Check if this instruction can cisc-spill to 'alternate'
bool cisc_spills_to(ArchDesc &AD, InstructForm *alternate);
InstructForm *cisc_spill_alternate() { return _cisc_spill_alternate; }
- uint cisc_spill_operand() const { return _cisc_spill_operand; }
+ int cisc_spill_operand() const { return _cisc_spill_operand; }
bool is_cisc_alternate() const { return _is_cisc_alternate; }
void set_cisc_alternate(bool val) { _is_cisc_alternate = val; }
const char *cisc_reg_mask_name() const { return _cisc_reg_mask_name; }
@@ -277,6 +279,7 @@
return idx;
}
}
+ const char *unique_opnd_ident(int idx); // Name of operand at unique idx.
// Operands which are only KILLs aren't part of the input array and
// require special handling in some cases. Their position in this
@@ -889,6 +892,7 @@
void dump(); // Debug printer
void output(FILE *fp); // Write to output files
+ const char* getUsedefName();
public:
// Implementation depends upon working bit intersection and union.
@@ -1030,6 +1034,7 @@
void matchrule_swap_commutative_op(const char* instr_ident, int count, int& match_rules_cnt);
void dump();
+ void output_short(FILE *fp);
void output(FILE *fp);
};
--- a/hotspot/src/share/vm/adlc/main.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/main.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -341,14 +341,20 @@
static void usage(ArchDesc& AD)
{
printf("Architecture Description Language Compiler\n\n");
- printf("Usage: adl [-doqw] [-Dflag[=def]] [-Uflag] [-cFILENAME] [-hFILENAME] [-aDFAFILE] ADLFILE\n");
+ printf("Usage: adlc [-doqwTs] [-#]* [-D<FLAG>[=<DEF>]] [-U<FLAG>] [-c<CPP_FILE_NAME>] [-h<HPP_FILE_NAME>] [-a<DFA_FILE_NAME>] [-v<GLOBALS_FILE_NAME>] <ADL_FILE_NAME>\n");
printf(" d produce DFA debugging info\n");
printf(" o no output produced, syntax and semantic checking only\n");
printf(" q quiet mode, supresses all non-essential messages\n");
printf(" w suppress warning messages\n");
+ printf(" T make DFA as many subroutine calls\n");
+ printf(" s output which instructions are cisc-spillable\n");
+ printf(" D define preprocessor symbol\n");
+ printf(" U undefine preprocessor symbol\n");
printf(" c specify CPP file name (default: %s)\n", AD._CPP_file._name);
printf(" h specify HPP file name (default: %s)\n", AD._HPP_file._name);
printf(" a specify DFA output file name\n");
+ printf(" v specify adGlobals output file name\n");
+ printf(" # increment ADL debug level\n");
printf("\n");
}
@@ -450,22 +456,6 @@
return fname;
}
-//------------------------------strip_path_and_ext------------------------------
-static char *strip_path_and_ext(char *fname)
-{
- char *ep;
- char *sp;
-
- if (fname) {
- for (sp = fname; *sp; sp++)
- if (*sp == '/') fname = sp+1;
- ep = fname; // start at first character and look for '.'
- while (ep <= (fname + strlen(fname) - 1) && *ep != '.') ep++;
- if (*ep == '.') *ep = '\0'; // truncate string at '.'
- }
- return fname;
-}
-
//------------------------------base_plus_suffix-------------------------------
// New concatenated string
static char *base_plus_suffix(const char* base, const char *suffix)
@@ -477,18 +467,6 @@
return fname;
}
-
-//------------------------------prefix_plus_base_plus_suffix-------------------
-// New concatenated string
-static char *prefix_plus_base_plus_suffix(const char* prefix, const char* base, const char *suffix)
-{
- int len = (int)strlen(prefix) + (int)strlen(base) + (int)strlen(suffix) + 1;
-
- char* fname = new char[len];
- sprintf(fname,"%s%s%s",prefix,base,suffix);
- return fname;
-}
-
//------------------------------get_legal_text---------------------------------
// Get pointer to legal text at the beginning of AD file.
// This code assumes that a legal text starts at the beginning of .ad files,
--- a/hotspot/src/share/vm/adlc/output_c.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -518,6 +518,14 @@
int cycles = piperesource->_cycles;
uint stage = pipeline->_stages.index(piperesource->_stage);
+ if (NameList::Not_in_list == stage) {
+ fprintf(stderr,
+ "pipeline_res_mask_initializer: "
+ "semantic error: "
+ "pipeline stage undeclared: %s\n",
+ piperesource->_stage);
+ exit(1);
+ }
uint upper_limit = stage+cycles-1;
uint lower_limit = stage-1;
uint upper_idx = upper_limit >> 5;
@@ -1000,7 +1008,7 @@
}
fprintf(fp_cpp, "};\n\n");
fprintf(fp_cpp, "#ifndef PRODUCT\n");
- fprintf(fp_cpp, "void Bundle::dump() const {\n");
+ fprintf(fp_cpp, "void Bundle::dump(outputStream *st) const {\n");
fprintf(fp_cpp, " static const char * bundle_flags[] = {\n");
fprintf(fp_cpp, " \"\",\n");
fprintf(fp_cpp, " \"use nop delay\",\n");
@@ -1019,22 +1027,22 @@
// See if the same string is in the table
fprintf(fp_cpp, " bool needs_comma = false;\n\n");
fprintf(fp_cpp, " if (_flags) {\n");
- fprintf(fp_cpp, " tty->print(\"%%s\", bundle_flags[_flags]);\n");
+ fprintf(fp_cpp, " st->print(\"%%s\", bundle_flags[_flags]);\n");
fprintf(fp_cpp, " needs_comma = true;\n");
fprintf(fp_cpp, " };\n");
fprintf(fp_cpp, " if (instr_count()) {\n");
- fprintf(fp_cpp, " tty->print(\"%%s%%d instr%%s\", needs_comma ? \", \" : \"\", instr_count(), instr_count() != 1 ? \"s\" : \"\");\n");
+ fprintf(fp_cpp, " st->print(\"%%s%%d instr%%s\", needs_comma ? \", \" : \"\", instr_count(), instr_count() != 1 ? \"s\" : \"\");\n");
fprintf(fp_cpp, " needs_comma = true;\n");
fprintf(fp_cpp, " };\n");
fprintf(fp_cpp, " uint r = resources_used();\n");
fprintf(fp_cpp, " if (r) {\n");
- fprintf(fp_cpp, " tty->print(\"%%sresource%%s:\", needs_comma ? \", \" : \"\", (r & (r-1)) != 0 ? \"s\" : \"\");\n");
+ fprintf(fp_cpp, " st->print(\"%%sresource%%s:\", needs_comma ? \", \" : \"\", (r & (r-1)) != 0 ? \"s\" : \"\");\n");
fprintf(fp_cpp, " for (uint i = 0; i < %d; i++)\n", _pipeline->_rescount);
fprintf(fp_cpp, " if ((r & (1 << i)) != 0)\n");
- fprintf(fp_cpp, " tty->print(\" %%s\", resource_names[i]);\n");
+ fprintf(fp_cpp, " st->print(\" %%s\", resource_names[i]);\n");
fprintf(fp_cpp, " needs_comma = true;\n");
fprintf(fp_cpp, " };\n");
- fprintf(fp_cpp, " tty->print(\"\\n\");\n");
+ fprintf(fp_cpp, " st->print(\"\\n\");\n");
fprintf(fp_cpp, "}\n");
fprintf(fp_cpp, "#endif\n");
}
@@ -1048,39 +1056,6 @@
node, regMask);
}
-// Scan the peepmatch and output a test for each instruction
-static void check_peepmatch_instruction_tree(FILE *fp, PeepMatch *pmatch, PeepConstraint *pconstraint) {
- int parent = -1;
- int inst_position = 0;
- const char* inst_name = NULL;
- int input = 0;
- fprintf(fp, " // Check instruction sub-tree\n");
- pmatch->reset();
- for( pmatch->next_instruction( parent, inst_position, inst_name, input );
- inst_name != NULL;
- pmatch->next_instruction( parent, inst_position, inst_name, input ) ) {
- // If this is not a placeholder
- if( ! pmatch->is_placeholder() ) {
- // Define temporaries 'inst#', based on parent and parent's input index
- if( parent != -1 ) { // root was initialized
- fprintf(fp, " inst%d = inst%d->in(%d);\n",
- inst_position, parent, input);
- }
-
- // When not the root
- // Test we have the correct instruction by comparing the rule
- if( parent != -1 ) {
- fprintf(fp, " matches = matches && ( inst%d->rule() == %s_rule );",
- inst_position, inst_name);
- }
- } else {
- // Check that user did not try to constrain a placeholder
- assert( ! pconstraint->constrains_instruction(inst_position),
- "fatal(): Can not constrain a placeholder instruction");
- }
- }
-}
-
static void print_block_index(FILE *fp, int inst_position) {
assert( inst_position >= 0, "Instruction number less than zero");
fprintf(fp, "block_index");
@@ -1242,7 +1217,7 @@
if( left_op_index != 0 ) {
assert( (left_index <= 9999) && (left_op_index <= 9999), "exceed string size");
// Must have index into operands
- sprintf(left_reg_index,",inst%d_idx%d", left_index, left_op_index);
+ sprintf(left_reg_index,",inst%d_idx%d", (int)left_index, left_op_index);
} else {
strcpy(left_reg_index, "");
}
@@ -1255,7 +1230,7 @@
if( right_op_index != 0 ) {
assert( (right_index <= 9999) && (right_op_index <= 9999), "exceed string size");
// Must have index into operands
- sprintf(right_reg_index,",inst%d_idx%d", right_index, right_op_index);
+ sprintf(right_reg_index,",inst%d_idx%d", (int)right_index, right_op_index);
} else {
strcpy(right_reg_index, "");
}
@@ -1645,7 +1620,7 @@
new_pos = new_inst->operand_position(parameter,Component::USE);
exp_pos += node->num_opnds();
// If there is no use of the created operand, just skip it
- if (new_pos != -1) {
+ if (new_pos != NameList::Not_in_list) {
//Copy the operand from the original made above
fprintf(fp," n%d->set_opnd_array(%d, op%d->clone(C)); // %s\n",
cnt, new_pos, exp_pos-node->num_opnds(), opid);
@@ -1789,7 +1764,8 @@
// Build mapping from num_edges to local variables
fprintf(fp," unsigned num0 = 0;\n");
for( i = 1; i < cur_num_opnds; i++ ) {
- fprintf(fp," unsigned num%d = opnd_array(%d)->num_edges();\n",i,i);
+ fprintf(fp," unsigned num%d = opnd_array(%d)->num_edges();",i,i);
+ fprintf(fp, " \t// %s\n", node->opnd_ident(i));
}
// Build a mapping from operand index to input edges
fprintf(fp," unsigned idx0 = oper_input_base();\n");
@@ -1934,6 +1910,7 @@
}
// Track necessary state when identifying a replacement variable
+ // @arg rep_var: The formal parameter of the encoding.
void update_state(const char *rep_var) {
// A replacement variable or one of its subfields
// Obtain replacement variable from list
@@ -1955,7 +1932,7 @@
}
}
else {
- // Lookup its position in parameter list
+ // Lookup its position in (formal) parameter list of encoding
int param_no = _encoding.rep_var_index(rep_var);
if ( param_no == -1 ) {
_AD.syntax_err( _encoding._linenum,
@@ -1964,6 +1941,7 @@
}
// Lookup the corresponding ins_encode parameter
+ // This is the argument (actual parameter) to the encoding.
const char *inst_rep_var = _ins_encode.rep_var_name(_inst, param_no);
if (inst_rep_var == NULL) {
_AD.syntax_err( _ins_encode._linenum,
@@ -2329,6 +2307,7 @@
// Add parameter for index position, if not result operand
if( _operand_idx != 0 ) fprintf(_fp,",idx%d", _operand_idx);
fprintf(_fp,")");
+ fprintf(_fp, "/* %s */", _operand_name);
}
} else {
assert( _reg_status == LITERAL_OUTPUT, "should have output register literal in emit_rep_var");
@@ -2368,7 +2347,7 @@
}
} else {
assert( _constant_status == LITERAL_OUTPUT, "should have output constant literal in emit_rep_var");
- // Cosntant literal has already been sent to output file, nothing more needed
+ // Constant literal has already been sent to output file, nothing more needed
}
}
else if ( strcmp(rep_var,"$disp") == 0 ) {
@@ -2387,6 +2366,8 @@
}
else {
printf("emit_field: %s\n",rep_var);
+ globalAD->syntax_err(_inst._linenum, "Unknown replacement variable %s in format statement of %s.",
+ rep_var, _inst._ident);
assert( false, "UnImplemented()");
}
}
@@ -2484,14 +2465,14 @@
//(1)
// Output instruction's emit prototype
- fprintf(fp,"uint %sNode::size(PhaseRegAlloc *ra_) const {\n",
+ fprintf(fp,"uint %sNode::size(PhaseRegAlloc *ra_) const {\n",
inst._ident);
- fprintf(fp, " assert(VerifyOops || MachNode::size(ra_) <= %s, \"bad fixed size\");\n", inst._size);
+ fprintf(fp, " assert(VerifyOops || MachNode::size(ra_) <= %s, \"bad fixed size\");\n", inst._size);
//(2)
// Print the size
- fprintf(fp, " return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size);
+ fprintf(fp, " return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size);
// (3) and (4)
fprintf(fp,"}\n");
@@ -2579,7 +2560,7 @@
}
// (3) and (4)
- fprintf(fp, "}\n");
+ fprintf(fp, "}\n\n");
}
// defineEvalConstant ---------------------------------------------------------
@@ -2727,12 +2708,12 @@
// (2) }
//
static void defineClone(FILE *fp, FormDict &globalNames, OperandForm &oper) {
- fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper._ident);
+ fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper._ident);
// Check for constants that need to be copied over
const int num_consts = oper.num_consts(globalNames);
const bool is_ideal_bool = oper.is_ideal_bool();
if( (num_consts > 0) ) {
- fprintf(fp," return new (C) %sOper(", oper._ident);
+ fprintf(fp," return new (C) %sOper(", oper._ident);
// generate parameters for constants
int i = 0;
fprintf(fp,"_c%d", i);
@@ -2744,21 +2725,12 @@
}
else {
assert( num_consts == 0, "Currently support zero or one constant per operand clone function");
- fprintf(fp," return new (C) %sOper();\n", oper._ident);
+ fprintf(fp," return new (C) %sOper();\n", oper._ident);
}
// finish method
fprintf(fp,"}\n");
}
-static void define_hash(FILE *fp, char *operand) {
- fprintf(fp,"uint %sOper::hash() const { return 5; }\n", operand);
-}
-
-static void define_cmp(FILE *fp, char *operand) {
- fprintf(fp,"uint %sOper::cmp( const MachOper &oper ) const { return opcode() == oper.opcode(); }\n", operand);
-}
-
-
// Helper functions for bug 4796752, abstracted with minimal modification
// from define_oper_interface()
OperandForm *rep_var_to_operand(const char *encoding, OperandForm &oper, FormDict &globals) {
@@ -2852,14 +2824,14 @@
} else if ( (strcmp(name,"disp") == 0) ) {
fprintf(fp,"(PhaseRegAlloc *ra_, const Node *node, int idx) const { \n");
} else {
- fprintf(fp,"() const { ");
+ fprintf(fp,"() const { \n");
}
// Check for hexadecimal value OR replacement variable
if( *encoding == '$' ) {
// Replacement variable
const char *rep_var = encoding + 1;
- fprintf(fp,"// Replacement variable: %s\n", encoding+1);
+ fprintf(fp," // Replacement variable: %s\n", encoding+1);
// Lookup replacement variable, rep_var, in operand's component list
const Component *comp = oper._components.search(rep_var);
assert( comp != NULL, "Replacement variable not found in components");
@@ -2880,10 +2852,10 @@
} else if ( op->ideal_to_sReg_type(op->_ident) != Form::none ) {
// StackSlot for an sReg comes either from input node or from self, when idx==0
fprintf(fp," if( idx != 0 ) {\n");
- fprintf(fp," // Access register number for input operand\n");
+ fprintf(fp," // Access stack offset (register number) for input operand\n");
fprintf(fp," return ra_->reg2offset(ra_->get_reg_first(node->in(idx)));/* sReg */\n");
fprintf(fp," }\n");
- fprintf(fp," // Access register number from myself\n");
+ fprintf(fp," // Access stack offset (register number) from myself\n");
fprintf(fp," return ra_->reg2offset(ra_->get_reg_first(node));/* sReg */\n");
} else if (op->_matrule && op->_matrule->is_base_constant(globals)) {
// Constant
@@ -2900,7 +2872,7 @@
}
else if( *encoding == '0' && *(encoding+1) == 'x' ) {
// Hex value
- fprintf(fp,"return %s;", encoding);
+ fprintf(fp," return %s;\n", encoding);
} else {
assert( false, "Do not support octal or decimal encode constants");
}
@@ -3133,8 +3105,8 @@
// Output the definition for number of relocation entries
uint reloc_size = instr->reloc(_globalNames);
if ( reloc_size != 0 ) {
- fprintf(fp,"int %sNode::reloc() const {\n", instr->_ident);
- fprintf(fp, " return %d;\n", reloc_size );
+ fprintf(fp,"int %sNode::reloc() const {\n", instr->_ident);
+ fprintf(fp," return %d;\n", reloc_size);
fprintf(fp,"}\n");
fprintf(fp,"\n");
}
@@ -3241,7 +3213,7 @@
class OutputReduceOp : public OutputMap {
public:
OutputReduceOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD) {};
+ : OutputMap(hpp, cpp, globals, AD, "reduceOp") {};
void declaration() { fprintf(_hpp, "extern const int reduceOp[];\n"); }
void definition() { fprintf(_cpp, "const int reduceOp[] = {\n"); }
@@ -3276,7 +3248,7 @@
class OutputLeftOp : public OutputMap {
public:
OutputLeftOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD) {};
+ : OutputMap(hpp, cpp, globals, AD, "leftOp") {};
void declaration() { fprintf(_hpp, "extern const int leftOp[];\n"); }
void definition() { fprintf(_cpp, "const int leftOp[] = {\n"); }
@@ -3306,7 +3278,7 @@
class OutputRightOp : public OutputMap {
public:
OutputRightOp(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD) {};
+ : OutputMap(hpp, cpp, globals, AD, "rightOp") {};
void declaration() { fprintf(_hpp, "extern const int rightOp[];\n"); }
void definition() { fprintf(_cpp, "const int rightOp[] = {\n"); }
@@ -3336,11 +3308,11 @@
class OutputRuleName : public OutputMap {
public:
OutputRuleName(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD) {};
+ : OutputMap(hpp, cpp, globals, AD, "ruleName") {};
void declaration() { fprintf(_hpp, "extern const char *ruleName[];\n"); }
void definition() { fprintf(_cpp, "const char *ruleName[] = {\n"); }
- void closing() { fprintf(_cpp, " \"no trailing comma\"\n");
+ void closing() { fprintf(_cpp, " \"invalid rule name\" // no trailing comma\n");
OutputMap::closing();
}
void map(OpClassForm &opc) { fprintf(_cpp, " \"%s\"", _AD.machOperEnum(opc._ident) ); }
@@ -3354,7 +3326,7 @@
class OutputSwallowed : public OutputMap {
public:
OutputSwallowed(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD) {};
+ : OutputMap(hpp, cpp, globals, AD, "swallowed") {};
void declaration() { fprintf(_hpp, "extern const bool swallowed[];\n"); }
void definition() { fprintf(_cpp, "const bool swallowed[] = {\n"); }
@@ -3375,7 +3347,7 @@
class OutputInstChainRule : public OutputMap {
public:
OutputInstChainRule(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD) {};
+ : OutputMap(hpp, cpp, globals, AD, "instruction_chain_rule") {};
void declaration() { fprintf(_hpp, "extern const bool instruction_chain_rule[];\n"); }
void definition() { fprintf(_cpp, "const bool instruction_chain_rule[] = {\n"); }
@@ -3416,7 +3388,7 @@
if ( op->ideal_only() ) continue;
// Generate the entry for this opcode
- map.map(*op); fprintf(fp_cpp, ", // %d\n", idx);
+ fprintf(fp_cpp, " /* %4d */", idx); map.map(*op); fprintf(fp_cpp, ",\n");
++idx;
};
fprintf(fp_cpp, " // last operand\n");
@@ -3425,7 +3397,7 @@
map.record_position(OutputMap::BEGIN_OPCLASSES, idx );
_opclass.reset();
for(; (opc = (OpClassForm*)_opclass.iter()) != NULL; ) {
- map.map(*opc); fprintf(fp_cpp, ", // %d\n", idx);
+ fprintf(fp_cpp, " /* %4d */", idx); map.map(*opc); fprintf(fp_cpp, ",\n");
++idx;
};
fprintf(fp_cpp, " // last operand class\n");
@@ -3435,7 +3407,7 @@
_internalOpNames.reset();
char *name = NULL;
for(; (name = (char *)_internalOpNames.iter()) != NULL; ) {
- map.map(name); fprintf(fp_cpp, ", // %d\n", idx);
+ fprintf(fp_cpp, " /* %4d */", idx); map.map(name); fprintf(fp_cpp, ",\n");
++idx;
};
fprintf(fp_cpp, " // last internally defined operand\n");
@@ -3453,7 +3425,7 @@
if ( ! inst->is_simple_chain_rule(_globalNames) ) continue;
if ( inst->rematerialize(_globalNames, get_registers()) ) continue;
- map.map(*inst); fprintf(fp_cpp, ", // %d\n", idx);
+ fprintf(fp_cpp, " /* %4d */", idx); map.map(*inst); fprintf(fp_cpp, ",\n");
++idx;
};
map.record_position(OutputMap::BEGIN_REMATERIALIZE, idx );
@@ -3464,7 +3436,7 @@
if ( ! inst->is_simple_chain_rule(_globalNames) ) continue;
if ( ! inst->rematerialize(_globalNames, get_registers()) ) continue;
- map.map(*inst); fprintf(fp_cpp, ", // %d\n", idx);
+ fprintf(fp_cpp, " /* %4d */", idx); map.map(*inst); fprintf(fp_cpp, ",\n");
++idx;
};
map.record_position(OutputMap::END_INST_CHAIN_RULES, idx );
@@ -3478,7 +3450,7 @@
if ( inst->is_simple_chain_rule(_globalNames) ) continue;
if ( ! inst->rematerialize(_globalNames, get_registers()) ) continue;
- map.map(*inst); fprintf(fp_cpp, ", // %d\n", idx);
+ fprintf(fp_cpp, " /* %4d */", idx); map.map(*inst); fprintf(fp_cpp, ",\n");
++idx;
};
map.record_position(OutputMap::END_REMATERIALIZE, idx );
@@ -3489,7 +3461,7 @@
if ( inst->is_simple_chain_rule(_globalNames) ) continue;
if ( inst->rematerialize(_globalNames, get_registers()) ) continue;
- map.map(*inst); fprintf(fp_cpp, ", // %d\n", idx);
+ fprintf(fp_cpp, " /* %4d */", idx); map.map(*inst); fprintf(fp_cpp, ",\n");
++idx;
};
}
@@ -3571,7 +3543,7 @@
next = _register->iter_RegDefs();
char policy = reg_save_policy(rdef->_callconv);
const char *comma = (next != NULL) ? "," : " // no trailing comma";
- fprintf(fp_cpp, " '%c'%s\n", policy, comma);
+ fprintf(fp_cpp, " '%c'%s // %s\n", policy, comma, rdef->_regname);
}
fprintf(fp_cpp, "};\n\n");
@@ -3583,7 +3555,7 @@
next = _register->iter_RegDefs();
char policy = reg_save_policy(rdef->_c_conv);
const char *comma = (next != NULL) ? "," : " // no trailing comma";
- fprintf(fp_cpp, " '%c'%s\n", policy, comma);
+ fprintf(fp_cpp, " '%c'%s // %s\n", policy, comma, rdef->_regname);
}
fprintf(fp_cpp, "};\n\n");
@@ -3644,6 +3616,8 @@
fprintf(fp, "_leaf->bottom_type()->is_ptr()");
} else if ( (strcmp(optype,"ConN") == 0) ) {
fprintf(fp, "_leaf->bottom_type()->is_narrowoop()");
+ } else if ( (strcmp(optype,"ConNKlass") == 0) ) {
+ fprintf(fp, "_leaf->bottom_type()->is_narrowklass()");
} else if ( (strcmp(optype,"ConF") == 0) ) {
fprintf(fp, "_leaf->getf()");
} else if ( (strcmp(optype,"ConD") == 0) ) {
@@ -3792,7 +3766,7 @@
// For each operand not in the match rule, call MachOperGenerator
// with the enum for the opcode that needs to be built.
ComponentList clist = inst->_components;
- int index = clist.operand_position(comp->_name, comp->_usedef);
+ int index = clist.operand_position(comp->_name, comp->_usedef, inst);
const char *opcode = machOperEnum(comp->_type);
fprintf(fp_cpp, "%s node->set_opnd_array(%d, ", indent, index);
fprintf(fp_cpp, "MachOperGenerator(%s, C));\n", opcode);
@@ -3987,7 +3961,7 @@
fprintf(fp_cpp, " case %s_rule:", opClass);
// Start local scope
- fprintf(fp_cpp, " {\n");
+ fprintf(fp_cpp, " {\n");
// Generate code to construct the new MachNode
buildMachNode(fp_cpp, inst, " ");
// Return result and exit scope
@@ -4137,6 +4111,9 @@
// Get info for the CISC_oracle and MachNode::cisc_version()
void ArchDesc::identify_cisc_spill_instructions() {
+ if (_frame == NULL)
+ return;
+
// Find the user-defined operand for cisc-spilling
if( _frame->_cisc_spilling_operand_name != NULL ) {
const Form *form = _globalNames[_frame->_cisc_spilling_operand_name];
--- a/hotspot/src/share/vm/adlc/output_h.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/adlc/output_h.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -25,6 +25,8 @@
// output_h.cpp - Class HPP file output routines for architecture definition
#include "adlc.hpp"
+// The comment delimiter used in format statements after assembler instructions.
+#define commentSeperator "!"
// Generate the #define that describes the number of registers.
static void defineRegCount(FILE *fp, RegisterForm *registers) {
@@ -79,10 +81,15 @@
_register->reset_RegDefs();
int i = 0;
while( (reg_def = _register->iter_RegDefs()) != NULL ) {
- fprintf(fp_hpp," %s_num,\t\t// %d\n", reg_def->_regname, i++);
+ fprintf(fp_hpp," %s_num,", reg_def->_regname);
+ for (int j = 0; j < 20-(int)strlen(reg_def->_regname); j++) fprintf(fp_hpp, " ");
+ fprintf(fp_hpp," // enum %3d, regnum %3d, reg encode %3s\n",
+ i++,
+ reg_def->register_num(),
+ reg_def->register_encode());
}
// Finish defining enumeration
- fprintf(fp_hpp, " _last_Mach_Reg\t// %d\n", i);
+ fprintf(fp_hpp, " _last_Mach_Reg // %d\n", i);
fprintf(fp_hpp, "};\n");
}
@@ -121,13 +128,24 @@
fprintf(fp_hpp, "// in the order of occurrence in the alloc_class(es).\n");
fprintf(fp_hpp, "enum MachRegisterEncodes {\n");
+ // Find max enum string length.
+ size_t maxlen = 0;
+ _register->reset_RegDefs();
+ reg_def = _register->iter_RegDefs();
+ while (reg_def != NULL) {
+ size_t len = strlen(reg_def->_regname);
+ if (len > maxlen) maxlen = len;
+ reg_def = _register->iter_RegDefs();
+ }
+
// Output the register encoding for each register in the allocation classes
_register->reset_RegDefs();
reg_def_next = _register->iter_RegDefs();
while( (reg_def = reg_def_next) != NULL ) {
reg_def_next = _register->iter_RegDefs();
- fprintf(fp_hpp," %s_enc = %s%s\n",
- reg_def->_regname, reg_def->register_encode(), reg_def_next == NULL? "" : "," );
+ fprintf(fp_hpp," %s_enc", reg_def->_regname);
+ for (size_t i = strlen(reg_def->_regname); i < maxlen; i++) fprintf(fp_hpp, " ");
+ fprintf(fp_hpp," = %3s%s\n", reg_def->register_encode(), reg_def_next == NULL? "" : "," );
}
// Finish defining enumeration
fprintf(fp_hpp, "};\n");
@@ -177,14 +195,6 @@
fprintf(fp," virtual const RegMask *in_RegMask(int index) const;\n");
}
-static void declare_hash(FILE *fp) {
- fprintf(fp," virtual uint hash() const;\n");
-}
-
-static void declare_cmp(FILE *fp) {
- fprintf(fp," virtual uint cmp( const MachOper &oper ) const;\n");
-}
-
static void declareConstStorage(FILE *fp, FormDict &globals, OperandForm *oper) {
int i = 0;
Component *comp;
@@ -207,6 +217,10 @@
if (i > 0) fprintf(fp,", ");
fprintf(fp," const TypeNarrowOop *_c%d;\n", i);
}
+ else if (!strcmp(type, "ConNKlass")) {
+ if (i > 0) fprintf(fp,", ");
+ fprintf(fp," const TypeNarrowKlass *_c%d;\n", i);
+ }
else if (!strcmp(type, "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," jlong _c%d;\n", i);
@@ -243,6 +257,10 @@
fprintf(fp," const TypePtr *_c%d;\n", i);
i++;
}
+ else if (!strcmp(comp->base_type(globals), "ConNKlass")) {
+ fprintf(fp," const TypePtr *_c%d;\n", i);
+ i++;
+ }
else if (!strcmp(comp->base_type(globals), "ConL")) {
fprintf(fp," jlong _c%d;\n", i);
i++;
@@ -288,11 +306,12 @@
fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i);
break;
}
- case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; }
- case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
- case Form::idealL : { fprintf(fp,"jlong c%d", i); break; }
- case Form::idealF : { fprintf(fp,"jfloat c%d", i); break; }
- case Form::idealD : { fprintf(fp,"jdouble c%d", i); break; }
+ case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; }
+ case Form::idealNKlass : { fprintf(fp,"const TypeNarrowKlass *c%d", i); break; }
+ case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
+ case Form::idealL : { fprintf(fp,"jlong c%d", i); break; }
+ case Form::idealF : { fprintf(fp,"jfloat c%d", i); break; }
+ case Form::idealD : { fprintf(fp,"jdouble c%d", i); break; }
default:
assert(!is_ideal_bool, "Non-constant operand lacks component list.");
break;
@@ -316,6 +335,11 @@
fprintf(fp,"const TypePtr *c%d", i);
i++;
}
+ else if (!strcmp(comp->base_type(globals), "ConNKlass")) {
+ if (i > 0) fprintf(fp,", ");
+ fprintf(fp,"const TypePtr *c%d", i);
+ i++;
+ }
else if (!strcmp(comp->base_type(globals), "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"jlong c%d", i);
@@ -358,18 +382,19 @@
static void defineCCodeDump(OperandForm* oper, FILE *fp, int i) {
assert(oper != NULL, "what");
CondInterface* cond = oper->_interface->is_CondInterface();
- fprintf(fp, " if( _c%d == BoolTest::eq ) st->print(\"%s\");\n",i,cond->_equal_format);
- fprintf(fp, " else if( _c%d == BoolTest::ne ) st->print(\"%s\");\n",i,cond->_not_equal_format);
- fprintf(fp, " else if( _c%d == BoolTest::le ) st->print(\"%s\");\n",i,cond->_less_equal_format);
- fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
- fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
- fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
+ fprintf(fp, " if( _c%d == BoolTest::eq ) st->print(\"%s\");\n",i,cond->_equal_format);
+ fprintf(fp, " else if( _c%d == BoolTest::ne ) st->print(\"%s\");\n",i,cond->_not_equal_format);
+ fprintf(fp, " else if( _c%d == BoolTest::le ) st->print(\"%s\");\n",i,cond->_less_equal_format);
+ fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
+ fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
+ fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
}
// Output code that dumps constant values, increment "i" if type is constant
static uint dump_spec_constant(FILE *fp, const char *ideal_type, uint i, OperandForm* oper) {
if (!strcmp(ideal_type, "ConI")) {
fprintf(fp," st->print(\"#%%d\", _c%d);\n", i);
+ fprintf(fp," st->print(\"/0x%%08x\", _c%d);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConP")) {
@@ -380,16 +405,25 @@
fprintf(fp," _c%d->dump_on(st);\n", i);
++i;
}
+ else if (!strcmp(ideal_type, "ConNKlass")) {
+ fprintf(fp," _c%d->dump_on(st);\n", i);
+ ++i;
+ }
else if (!strcmp(ideal_type, "ConL")) {
fprintf(fp," st->print(\"#\" INT64_FORMAT, _c%d);\n", i);
+ fprintf(fp," st->print(\"/\" PTR64_FORMAT, _c%d);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConF")) {
fprintf(fp," st->print(\"#%%f\", _c%d);\n", i);
+ fprintf(fp," jint _c%di = JavaValue(_c%d).get_jint();\n", i, i);
+ fprintf(fp," st->print(\"/0x%%x/\", _c%di);\n", i);
++i;
}
else if (!strcmp(ideal_type, "ConD")) {
fprintf(fp," st->print(\"#%%f\", _c%d);\n", i);
+ fprintf(fp," jlong _c%dl = JavaValue(_c%d).get_jlong();\n", i, i);
+ fprintf(fp," st->print(\"/\" PTR64_FORMAT, _c%dl);\n", i);
++i;
}
else if (!strcmp(ideal_type, "Bool")) {
@@ -411,7 +445,7 @@
}
// Local pointer indicates remaining part of format rule
- uint idx = 0; // position of operand in match rule
+ int idx = 0; // position of operand in match rule
// Generate internal format function, used when stored locally
fprintf(fp, "\n#ifndef PRODUCT\n");
@@ -426,13 +460,12 @@
oper._format->_rep_vars.reset();
oper._format->_strings.reset();
while ( (string = oper._format->_strings.iter()) != NULL ) {
- fprintf(fp," ");
// Check if this is a standard string or a replacement variable
if ( string != NameList::_signal ) {
// Normal string
// Pass through to st->print
- fprintf(fp,"st->print(\"%s\");\n", string);
+ fprintf(fp," st->print(\"%s\");\n", string);
} else {
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
@@ -455,7 +488,7 @@
}
// output invocation of "$..."s format function
- if ( op != NULL ) op->int_format(fp, globals, idx);
+ if ( op != NULL ) op->int_format(fp, globals, idx);
if ( idx == -1 ) {
fprintf(stderr,
@@ -498,13 +531,12 @@
oper._format->_rep_vars.reset();
oper._format->_strings.reset();
while ( (string = oper._format->_strings.iter()) != NULL ) {
- fprintf(fp," ");
// Check if this is a standard string or a replacement variable
if ( string != NameList::_signal ) {
// Normal string
// Pass through to st->print
- fprintf(fp,"st->print(\"%s\");\n", string);
+ fprintf(fp," st->print(\"%s\");\n", string);
} else {
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
@@ -529,7 +561,7 @@
if ( op != NULL ) op->ext_format(fp, globals, idx);
// Lookup the index position of the replacement variable
- idx = oper._components.operand_position_format(rep_var);
+ idx = oper._components.operand_position_format(rep_var, &oper);
if ( idx == -1 ) {
fprintf(stderr,
"Using a name, %s, that isn't in match rule\n", rep_var);
@@ -583,7 +615,7 @@
inst._format->_rep_vars.reset();
inst._format->_strings.reset();
while( (string = inst._format->_strings.iter()) != NULL ) {
- fprintf(fp," ");
+ fprintf(fp," ");
// Check if this is a standard string or a replacement variable
if( string == NameList::_signal ) { // Replacement variable
const char* rep_var = inst._format->_rep_vars.iter();
@@ -640,11 +672,12 @@
if( call_type != Form::invalid_type ) {
switch( call_type ) {
case Form::JAVA_DYNAMIC:
- fprintf(fp," _method->print_short_name();\n");
+ fprintf(fp," _method->print_short_name(st);\n");
break;
case Form::JAVA_STATIC:
- fprintf(fp," if( _method ) _method->print_short_name(st); else st->print(\" wrapper for: %%s\", _name);\n");
- fprintf(fp," if( !_method ) dump_trap_args(st);\n");
+ fprintf(fp," if( _method ) _method->print_short_name(st);\n");
+ fprintf(fp," else st->print(\" wrapper for: %%s\", _name);\n");
+ fprintf(fp," if( !_method ) dump_trap_args(st);\n");
break;
case Form::JAVA_COMPILED:
case Form::JAVA_INTERP:
@@ -652,52 +685,46 @@
case Form::JAVA_RUNTIME:
case Form::JAVA_LEAF:
case Form::JAVA_NATIVE:
- fprintf(fp," st->print(\" %%s\", _name);");
+ fprintf(fp," st->print(\" %%s\", _name);");
break;
default:
- assert(0,"ShouldNotReacHere");
+ assert(0,"ShouldNotReachHere");
}
- fprintf(fp, " st->print_cr(\"\");\n" );
- fprintf(fp, " if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\" No JVM State Info\");\n" );
- fprintf(fp, " st->print(\" # \");\n" );
- fprintf(fp, " if( _jvms ) _oop_map->print_on(st);\n");
+ fprintf(fp, " st->print_cr(\"\");\n" );
+ fprintf(fp, " if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\" No JVM State Info\");\n" );
+ fprintf(fp, " st->print(\" # \");\n" );
+ fprintf(fp, " if( _jvms && _oop_map ) _oop_map->print_on(st);\n");
}
else if(inst.is_ideal_safepoint()) {
- fprintf(fp, " st->print(\"\");\n" );
- fprintf(fp, " if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\" No JVM State Info\");\n" );
- fprintf(fp, " st->print(\" # \");\n" );
- fprintf(fp, " if( _jvms ) _oop_map->print_on(st);\n");
+ fprintf(fp, " st->print(\"\");\n" );
+ fprintf(fp, " if (_jvms) _jvms->format(ra, this, st); else st->print_cr(\" No JVM State Info\");\n" );
+ fprintf(fp, " st->print(\" # \");\n" );
+ fprintf(fp, " if( _jvms && _oop_map ) _oop_map->print_on(st);\n");
}
else if( inst.is_ideal_if() ) {
- fprintf(fp, " st->print(\" P=%%f C=%%f\",_prob,_fcnt);\n" );
+ fprintf(fp, " st->print(\" P=%%f C=%%f\",_prob,_fcnt);\n" );
}
else if( inst.is_ideal_mem() ) {
// Print out the field name if available to improve readability
- fprintf(fp, " if (ra->C->alias_type(adr_type())->field() != NULL) {\n");
- fprintf(fp, " ciField* f = ra->C->alias_type(adr_type())->field();\n");
- fprintf(fp, " st->print(\" ! Field: \");\n");
- fprintf(fp, " if (f->is_volatile())\n");
- fprintf(fp, " st->print(\"volatile \");\n");
- fprintf(fp, " f->holder()->name()->print_symbol_on(st);\n");
- fprintf(fp, " st->print(\".\");\n");
- fprintf(fp, " f->name()->print_symbol_on(st);\n");
- fprintf(fp, " if (f->is_constant())\n");
- fprintf(fp, " st->print(\" (constant)\");\n");
- fprintf(fp, " } else\n");
+ fprintf(fp, " if (ra->C->alias_type(adr_type())->field() != NULL) {\n");
+ fprintf(fp, " ciField* f = ra->C->alias_type(adr_type())->field();\n");
+ fprintf(fp, " st->print(\" %s Field: \");\n", commentSeperator);
+ fprintf(fp, " if (f->is_volatile())\n");
+ fprintf(fp, " st->print(\"volatile \");\n");
+ fprintf(fp, " f->holder()->name()->print_symbol_on(st);\n");
+ fprintf(fp, " st->print(\".\");\n");
+ fprintf(fp, " f->name()->print_symbol_on(st);\n");
+ fprintf(fp, " if (f->is_constant())\n");
+ fprintf(fp, " st->print(\" (constant)\");\n");
+ fprintf(fp, " } else {\n");
// Make sure 'Volatile' gets printed out
fprintf(fp, " if (ra->C->alias_type(adr_type())->is_volatile())\n");
fprintf(fp, " st->print(\" volatile!\");\n");
+ fprintf(fp, " }\n");
}
// Complete the definition of the format function
- fprintf(fp, " }\n#endif\n");
-}
-
-static bool is_non_constant(char* x) {
- // Tells whether the string (part of an operator interface) is non-constant.
- // Simply detect whether there is an occurrence of a formal parameter,
- // which will always begin with '$'.
- return strchr(x, '$') == 0;
+ fprintf(fp, "}\n#endif\n");
}
void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
@@ -1089,7 +1116,7 @@
fprintf(fp_hpp, " static void initialize_nops(MachNode *nop_list[%d], Compile* C);\n\n",
_pipeline->_nopcnt);
fprintf(fp_hpp, "#ifndef PRODUCT\n");
- fprintf(fp_hpp, " void dump() const;\n");
+ fprintf(fp_hpp, " void dump(outputStream *st = tty) const;\n");
fprintf(fp_hpp, "#endif\n");
fprintf(fp_hpp, "};\n\n");
@@ -1234,12 +1261,12 @@
unsigned int position = 0;
const char *opret, *opname, *optype;
oper->_matrule->base_operand(position,_globalNames,opret,opname,optype);
- fprintf(fp," virtual const Type *type() const {");
+ fprintf(fp," virtual const Type *type() const {");
const char *type = getIdealType(optype);
if( type != NULL ) {
Form::DataType data_type = oper->is_base_constant(_globalNames);
// Check if we are an ideal pointer type
- if( data_type == Form::idealP || data_type == Form::idealN ) {
+ if( data_type == Form::idealP || data_type == Form::idealN || data_type == Form::idealNKlass ) {
// Return the ideal type we already have: <TypePtr *>
fprintf(fp," return _c0;");
} else {
@@ -1377,6 +1404,16 @@
fprintf(fp, " return _c0->get_ptrtype()->reloc();");
fprintf(fp, " }\n");
}
+ else if (!strcmp(oper->ideal_type(_globalNames), "ConNKlass")) {
+ // Access the locally stored constant
+ fprintf(fp," virtual intptr_t constant() const {");
+ fprintf(fp, " return _c0->get_ptrtype()->get_con();");
+ fprintf(fp, " }\n");
+ // Generate query to determine if this pointer is an oop
+ fprintf(fp," virtual relocInfo::relocType constant_reloc() const {");
+ fprintf(fp, " return _c0->get_ptrtype()->reloc();");
+ fprintf(fp, " }\n");
+ }
else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {
fprintf(fp," virtual intptr_t constant() const {");
// We don't support addressing modes with > 4Gig offsets.
@@ -1503,12 +1540,19 @@
fprintf(fp, " GrowableArray<Label*> _index2label;\n");
}
fprintf(fp,"public:\n");
- fprintf(fp," MachOper *opnd_array(uint operand_index) const { assert(operand_index < _num_opnds, \"invalid _opnd_array index\"); return _opnd_array[operand_index]; }\n");
- fprintf(fp," void set_opnd_array(uint operand_index, MachOper *operand) { assert(operand_index < _num_opnds, \"invalid _opnd_array index\"); _opnd_array[operand_index] = operand; }\n");
+ fprintf(fp," MachOper *opnd_array(uint operand_index) const {\n");
+ fprintf(fp," assert(operand_index < _num_opnds, \"invalid _opnd_array index\");\n");
+ fprintf(fp," return _opnd_array[operand_index];\n");
+ fprintf(fp," }\n");
+ fprintf(fp," void set_opnd_array(uint operand_index, MachOper *operand) {\n");
+ fprintf(fp," assert(operand_index < _num_opnds, \"invalid _opnd_array index\");\n");
+ fprintf(fp," _opnd_array[operand_index] = operand;\n");
+ fprintf(fp," }\n");
fprintf(fp,"private:\n");
if ( instr->is_ideal_jump() ) {
fprintf(fp," virtual void add_case_label(int index_num, Label* blockLabel) {\n");
- fprintf(fp," _index2label.at_put_grow(index_num, blockLabel);}\n");
+ fprintf(fp," _index2label.at_put_grow(index_num, blockLabel);\n");
+ fprintf(fp," }\n");
}
if( can_cisc_spill() && (instr->cisc_spill_alternate() != NULL) ) {
fprintf(fp," const RegMask *_cisc_RegMask;\n");
@@ -1544,7 +1588,7 @@
while (attr != NULL) {
if (strcmp(attr->_ident,"ins_cost") &&
strcmp(attr->_ident,"ins_short_branch")) {
- fprintf(fp," int %s() const { return %s; }\n",
+ fprintf(fp," int %s() const { return %s; }\n",
attr->_ident, attr->_val);
}
// Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
@@ -1628,12 +1672,12 @@
// Output the declaration for number of relocation entries
if ( instr->reloc(_globalNames) != 0 ) {
- fprintf(fp," virtual int reloc() const;\n");
+ fprintf(fp," virtual int reloc() const;\n");
}
if (instr->alignment() != 1) {
- fprintf(fp," virtual int alignment_required() const { return %d; }\n", instr->alignment());
- fprintf(fp," virtual int compute_padding(int current_offset) const;\n");
+ fprintf(fp," virtual int alignment_required() const { return %d; }\n", instr->alignment());
+ fprintf(fp," virtual int compute_padding(int current_offset) const;\n");
}
// Starting point for inputs matcher wants.
@@ -1803,13 +1847,14 @@
// as is done for pointers
//
// Construct appropriate constant type containing the constant value.
- fprintf(fp," virtual const class Type *bottom_type() const{\n");
+ fprintf(fp," virtual const class Type *bottom_type() const {\n");
switch( data_type ) {
case Form::idealI:
fprintf(fp," return TypeInt::make(opnd_array(1)->constant());\n");
break;
case Form::idealP:
case Form::idealN:
+ case Form::idealNKlass:
fprintf(fp," return opnd_array(1)->type();\n");
break;
case Form::idealD:
@@ -1833,7 +1878,7 @@
// !!!!! !!!!!
// Provide explicit bottom type for conversions to int
// On Intel the result operand is a stackSlot, untyped.
- fprintf(fp," virtual const class Type *bottom_type() const{");
+ fprintf(fp," virtual const class Type *bottom_type() const {");
fprintf(fp, " return TypeInt::INT;");
fprintf(fp, " };\n");
}*/
@@ -1854,7 +1899,7 @@
// BoxNode provides the address of a stack slot.
// Define its bottom type to be TypeRawPtr::BOTTOM instead of TypePtr::BOTTOM
// This prevent s insert_anti_dependencies from complaining. It will
- // complain if it see that the pointer base is TypePtr::BOTTOM since
+ // complain if it sees that the pointer base is TypePtr::BOTTOM since
// it doesn't understand what that might alias.
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // Box?\n");
}
@@ -2017,7 +2062,7 @@
class OutputMachOperands : public OutputMap {
public:
OutputMachOperands(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD) {};
+ : OutputMap(hpp, cpp, globals, AD, "MachOperands") {};
void declaration() { }
void definition() { fprintf(_cpp, "enum MachOperands {\n"); }
@@ -2052,7 +2097,7 @@
int end_instructions;
public:
OutputMachOpcodes(FILE *hpp, FILE *cpp, FormDict &globals, ArchDesc &AD)
- : OutputMap(hpp, cpp, globals, AD),
+ : OutputMap(hpp, cpp, globals, AD, "MachOpcodes"),
begin_inst_chain_rule(-1), end_inst_chain_rule(-1), end_instructions(-1)
{};
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -32,6 +32,7 @@
#include "c1/c1_ValueMap.hpp"
#include "c1/c1_ValueStack.hpp"
#include "code/debugInfoRec.hpp"
+#include "compiler/compileLog.hpp"
typedef enum {
@@ -67,10 +68,25 @@
class PhaseTraceTime: public TraceTime {
private:
JavaThread* _thread;
+ CompileLog* _log;
public:
- PhaseTraceTime(TimerName timer):
- TraceTime("", &timers[timer], CITime || CITimeEach, Verbose) {
+ PhaseTraceTime(TimerName timer)
+ : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
+ if (Compilation::current() != NULL) {
+ _log = Compilation::current()->log();
+ }
+
+ if (_log != NULL) {
+ _log->begin_head("phase name='%s'", timer_name[timer]);
+ _log->stamp();
+ _log->end_head();
+ }
+ }
+
+ ~PhaseTraceTime() {
+ if (_log != NULL)
+ _log->done("phase");
}
};
@@ -390,6 +406,10 @@
PhaseTraceTime timeit(_t_codeinstall);
install_code(frame_size);
}
+
+ if (log() != NULL) // Print code cache state into compiler log
+ log()->code_cache_state();
+
totalInstructionNodes += Instruction::number_of_instructions();
}
@@ -456,6 +476,7 @@
int osr_bci, BufferBlob* buffer_blob)
: _compiler(compiler)
, _env(env)
+, _log(env->log())
, _method(method)
, _osr_bci(osr_bci)
, _hir(NULL)
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -66,6 +66,7 @@
int _next_block_id;
AbstractCompiler* _compiler;
ciEnv* _env;
+ CompileLog* _log;
ciMethod* _method;
int _osr_bci;
IR* _hir;
@@ -123,6 +124,7 @@
// accessors
ciEnv* env() const { return _env; }
+ CompileLog* log() const { return _log; }
AbstractCompiler* compiler() const { return _compiler; }
bool has_exception_handlers() const { return _has_exception_handlers; }
bool has_fpu_code() const { return _has_fpu_code; }
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1682,6 +1682,12 @@
ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
ciInstanceKlass* actual_recv = callee_holder;
+ CompileLog* log = compilation()->log();
+ if (log != NULL)
+ log->elem("call method='%d' instr='%s'",
+ log->identify(target),
+ Bytecodes::name(code));
+
// Some methods are obviously bindable without any type checks so
// convert them directly to an invokespecial or invokestatic.
if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) {
@@ -1826,6 +1832,7 @@
}
code = Bytecodes::_invokespecial;
}
+
// check if we could do inlining
if (!PatchALot && Inline && klass->is_loaded() &&
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
@@ -2448,6 +2455,7 @@
#endif
_skip_block = false;
assert(state() != NULL, "ValueStack missing!");
+ CompileLog* log = compilation()->log();
ciBytecodeStream s(method());
s.reset_to_bci(bci);
int prev_bci = bci;
@@ -2466,6 +2474,9 @@
(block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
assert(state()->kind() == ValueStack::Parsing, "invalid state kind");
+ if (log != NULL)
+ log->set_context("bc code='%d' bci='%d'", (int)code, s.cur_bci());
+
// Check for active jsr during OSR compilation
if (compilation()->is_osr_compile()
&& scope()->is_top_scope()
@@ -2686,8 +2697,13 @@
case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint", NULL);
default : ShouldNotReachHere(); break;
}
+
+ if (log != NULL)
+ log->clear_context(); // skip marker if nothing was printed
+
// save current bci to setup Goto at the end
prev_bci = s.cur_bci();
+
}
CHECK_BAILOUT_(NULL);
// stop processing of this block (see try_inline_full)
@@ -3667,7 +3683,7 @@
INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
}
// printing
- print_inlining(callee, "");
+ print_inlining(callee);
}
// NOTE: Bailouts from this point on, which occur at the
@@ -4133,8 +4149,19 @@
void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) {
+ CompileLog* log = compilation()->log();
+ if (log != NULL) {
+ if (success) {
+ if (msg != NULL)
+ log->inline_success(msg);
+ else
+ log->inline_success("receiver is statically known");
+ } else {
+ log->inline_fail(msg);
+ }
+ }
+
if (!PrintInlining) return;
- assert(msg != NULL, "must be");
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
if (success && CIPrintMethodCodes) {
callee->print_codes();
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -31,6 +31,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciMethodData.hpp"
#include "ci/ciStreams.hpp"
+#include "compiler/compileLog.hpp"
class MemoryBuffer;
@@ -369,7 +370,7 @@
void append_unsafe_CAS(ciMethod* callee);
bool append_unsafe_get_and_set_obj(ciMethod* callee, bool is_add);
- void print_inlining(ciMethod* callee, const char* msg, bool success = true);
+ void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
void profile_invocation(ciMethod* inlinee, ValueStack* state);
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1286,7 +1286,7 @@
if (x->needs_null_check()) {
info = state_for(x);
}
- __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), UseCompressedKlassPointers ? T_OBJECT : T_ADDRESS), result, info);
+ __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info);
__ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
}
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -29,6 +29,7 @@
#include "c1/c1_ValueSet.hpp"
#include "c1/c1_ValueStack.hpp"
#include "utilities/bitMap.inline.hpp"
+#include "compiler/compileLog.hpp"
define_array(ValueSetArray, ValueSet*);
define_stack(ValueSetList, ValueSetArray);
@@ -54,7 +55,18 @@
// substituted some ifops/phis, so resolve the substitution
SubstitutionResolver sr(_hir);
}
+
+ CompileLog* log = _hir->compilation()->log();
+ if (log != NULL)
+ log->set_context("optimize name='cee'");
}
+
+ ~CE_Eliminator() {
+ CompileLog* log = _hir->compilation()->log();
+ if (log != NULL)
+ log->clear_context(); // skip marker if nothing was printed
+ }
+
int cee_count() const { return _cee_count; }
int ifop_count() const { return _ifop_count; }
@@ -306,6 +318,15 @@
, _merge_count(0)
{
_hir->iterate_preorder(this);
+ CompileLog* log = _hir->compilation()->log();
+ if (log != NULL)
+ log->set_context("optimize name='eliminate_blocks'");
+ }
+
+ ~BlockMerger() {
+ CompileLog* log = _hir->compilation()->log();
+ if (log != NULL)
+ log->clear_context(); // skip marker if nothing was printed
}
bool try_merge(BlockBegin* block) {
@@ -574,6 +595,15 @@
, _work_list(new BlockList()) {
_visitable_instructions = new ValueSet();
_visitor.set_eliminator(this);
+ CompileLog* log = _opt->ir()->compilation()->log();
+ if (log != NULL)
+ log->set_context("optimize name='null_check_elimination'");
+ }
+
+ ~NullCheckEliminator() {
+ CompileLog* log = _opt->ir()->compilation()->log();
+ if (log != NULL)
+ log->clear_context(); // skip marker if nothing was printed
}
Optimizer* opt() { return _opt; }
--- a/hotspot/src/share/vm/ci/ciEnv.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1126,7 +1126,8 @@
if (all_tiers) {
log()->elem("method_not_compilable");
} else {
- log()->elem("method_not_compilable_at_tier");
+ log()->elem("method_not_compilable_at_tier level='%d'",
+ current()->task()->comp_level());
}
}
_compilable = new_compilable;
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -146,7 +146,7 @@
for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) {
BasicType t = (BasicType)i;
- if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP) {
+ if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP && t != T_NARROWKLASS) {
ciType::_basic_types[t] = new (_arena) ciType(t);
init_ident_of(ciType::_basic_types[t]);
}
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -816,9 +816,6 @@
unresolved_klass, class_loader, protection_domain,
false, CHECK_NULL);
interf = KlassHandle(THREAD, k);
-
- if (LinkWellKnownClasses) // my super type is well known to me
- cp->klass_at_put(interface_index, interf()); // eagerly resolve
}
if (!Klass::cast(interf())->is_interface()) {
@@ -1008,40 +1005,42 @@
BAD_ALLOCATION_TYPE, // 1
BAD_ALLOCATION_TYPE, // 2
BAD_ALLOCATION_TYPE, // 3
- NONSTATIC_BYTE , // T_BOOLEAN = 4,
- NONSTATIC_SHORT, // T_CHAR = 5,
- NONSTATIC_WORD, // T_FLOAT = 6,
- NONSTATIC_DOUBLE, // T_DOUBLE = 7,
- NONSTATIC_BYTE, // T_BYTE = 8,
- NONSTATIC_SHORT, // T_SHORT = 9,
- NONSTATIC_WORD, // T_INT = 10,
- NONSTATIC_DOUBLE, // T_LONG = 11,
- NONSTATIC_OOP, // T_OBJECT = 12,
- NONSTATIC_OOP, // T_ARRAY = 13,
- BAD_ALLOCATION_TYPE, // T_VOID = 14,
- BAD_ALLOCATION_TYPE, // T_ADDRESS = 15,
- BAD_ALLOCATION_TYPE, // T_NARROWOOP= 16,
- BAD_ALLOCATION_TYPE, // T_METADATA = 17,
- BAD_ALLOCATION_TYPE, // T_CONFLICT = 18,
+ NONSTATIC_BYTE , // T_BOOLEAN = 4,
+ NONSTATIC_SHORT, // T_CHAR = 5,
+ NONSTATIC_WORD, // T_FLOAT = 6,
+ NONSTATIC_DOUBLE, // T_DOUBLE = 7,
+ NONSTATIC_BYTE, // T_BYTE = 8,
+ NONSTATIC_SHORT, // T_SHORT = 9,
+ NONSTATIC_WORD, // T_INT = 10,
+ NONSTATIC_DOUBLE, // T_LONG = 11,
+ NONSTATIC_OOP, // T_OBJECT = 12,
+ NONSTATIC_OOP, // T_ARRAY = 13,
+ BAD_ALLOCATION_TYPE, // T_VOID = 14,
+ BAD_ALLOCATION_TYPE, // T_ADDRESS = 15,
+ BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16,
+ BAD_ALLOCATION_TYPE, // T_METADATA = 17,
+ BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18,
+ BAD_ALLOCATION_TYPE, // T_CONFLICT = 19,
BAD_ALLOCATION_TYPE, // 0
BAD_ALLOCATION_TYPE, // 1
BAD_ALLOCATION_TYPE, // 2
BAD_ALLOCATION_TYPE, // 3
- STATIC_BYTE , // T_BOOLEAN = 4,
- STATIC_SHORT, // T_CHAR = 5,
- STATIC_WORD, // T_FLOAT = 6,
- STATIC_DOUBLE, // T_DOUBLE = 7,
- STATIC_BYTE, // T_BYTE = 8,
- STATIC_SHORT, // T_SHORT = 9,
- STATIC_WORD, // T_INT = 10,
- STATIC_DOUBLE, // T_LONG = 11,
- STATIC_OOP, // T_OBJECT = 12,
- STATIC_OOP, // T_ARRAY = 13,
- BAD_ALLOCATION_TYPE, // T_VOID = 14,
- BAD_ALLOCATION_TYPE, // T_ADDRESS = 15,
- BAD_ALLOCATION_TYPE, // T_NARROWOOP= 16,
- BAD_ALLOCATION_TYPE, // T_METADATA = 17,
- BAD_ALLOCATION_TYPE, // T_CONFLICT = 18,
+ STATIC_BYTE , // T_BOOLEAN = 4,
+ STATIC_SHORT, // T_CHAR = 5,
+ STATIC_WORD, // T_FLOAT = 6,
+ STATIC_DOUBLE, // T_DOUBLE = 7,
+ STATIC_BYTE, // T_BYTE = 8,
+ STATIC_SHORT, // T_SHORT = 9,
+ STATIC_WORD, // T_INT = 10,
+ STATIC_DOUBLE, // T_LONG = 11,
+ STATIC_OOP, // T_OBJECT = 12,
+ STATIC_OOP, // T_ARRAY = 13,
+ BAD_ALLOCATION_TYPE, // T_VOID = 14,
+ BAD_ALLOCATION_TYPE, // T_ADDRESS = 15,
+ BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16,
+ BAD_ALLOCATION_TYPE, // T_METADATA = 17,
+ BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18,
+ BAD_ALLOCATION_TYPE, // T_CONFLICT = 19,
};
static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type) {
@@ -3192,8 +3191,6 @@
KlassHandle kh (THREAD, k);
super_klass = instanceKlassHandle(THREAD, kh());
- if (LinkWellKnownClasses) // my super class is well known to me
- cp->klass_at_put(super_class_index, super_klass()); // eagerly resolve
}
if (super_klass.not_null()) {
if (super_klass->is_interface()) {
@@ -3639,7 +3636,7 @@
// has to be changed accordingly.
this_klass->set_initial_method_idnum(methods->length());
this_klass->set_name(cp->klass_name_at(this_class_index));
- if (LinkWellKnownClasses || is_anonymous()) // I am well known to myself
+ if (is_anonymous()) // I am well known to myself
cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve
if (fields_annotations != NULL ||
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -344,7 +344,7 @@
// constant pool construction, but in later versions they can.
// %%% Let's phase out the old is_klass_reference.
bool is_klass_reference(constantPoolHandle cp, int index) {
- return ((LinkWellKnownClasses || EnableInvokeDynamic)
+ return (EnableInvokeDynamic
? cp->tag_at(index).is_klass_or_reference()
: cp->tag_at(index).is_klass_reference());
}
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -291,16 +291,6 @@
Handle protection_domain,
bool is_superclass,
TRAPS) {
-
- // Try to get one of the well-known klasses.
- // They are trusted, and do not participate in circularities.
- if (LinkWellKnownClasses) {
- Klass* k = find_well_known_klass(class_name);
- if (k != NULL) {
- return k;
- }
- }
-
// Double-check, if child class is already loaded, just return super-class,interface
// Don't add a placedholder if already loaded, i.e. already in system dictionary
// Make sure there's a placeholder for the *child* before resolving.
@@ -926,14 +916,6 @@
Klass* k = NULL;
assert(class_name != NULL, "class name must be non NULL");
- // Try to get one of the well-known klasses.
- if (LinkWellKnownClasses) {
- k = find_well_known_klass(class_name);
- if (k != NULL) {
- return k;
- }
- }
-
if (FieldType::is_array(class_name)) {
// The name refers to an array. Parse the name.
// dimension and object_key in FieldArrayInfo are assigned as a
@@ -954,38 +936,6 @@
return k;
}
-// Quick range check for names of well-known classes:
-static Symbol* wk_klass_name_limits[2] = {NULL, NULL};
-
-#ifndef PRODUCT
-static int find_wkk_calls, find_wkk_probes, find_wkk_wins;
-// counts for "hello world": 3983, 1616, 1075
-// => 60% hit after limit guard, 25% total win rate
-#endif
-
-Klass* SystemDictionary::find_well_known_klass(Symbol* class_name) {
- // A bounds-check on class_name will quickly get a negative result.
- NOT_PRODUCT(find_wkk_calls++);
- if (class_name >= wk_klass_name_limits[0] &&
- class_name <= wk_klass_name_limits[1]) {
- NOT_PRODUCT(find_wkk_probes++);
- vmSymbols::SID sid = vmSymbols::find_sid(class_name);
- if (sid != vmSymbols::NO_SID) {
- Klass* k = NULL;
- switch (sid) {
- #define WK_KLASS_CASE(name, symbol, ignore_option) \
- case vmSymbols::VM_SYMBOL_ENUM_NAME(symbol): \
- k = WK_KLASS(name); break;
- WK_KLASSES_DO(WK_KLASS_CASE)
- #undef WK_KLASS_CASE
- }
- NOT_PRODUCT(if (k != NULL) find_wkk_wins++);
- return k;
- }
- }
- return NULL;
-}
-
// Note: this method is much like resolve_from_stream, but
// updates no supplemental data structures.
// TODO consolidate the two methods with a helper routine?
@@ -1939,23 +1889,12 @@
int opt = (info & right_n_bits(CEIL_LG_OPTION_LIMIT));
initialize_wk_klass((WKID)id, opt, CHECK);
-
- // Update limits, so find_well_known_klass can be very fast:
- Symbol* s = vmSymbols::symbol_at((vmSymbols::SID)sid);
- if (wk_klass_name_limits[1] == NULL) {
- wk_klass_name_limits[0] = wk_klass_name_limits[1] = s;
- } else if (wk_klass_name_limits[1] < s) {
- wk_klass_name_limits[1] = s;
- } else if (wk_klass_name_limits[0] > s) {
- wk_klass_name_limits[0] = s;
- }
}
// move the starting value forward to the limit:
start_id = limit_id;
}
-
void SystemDictionary::initialize_preloaded_classes(TRAPS) {
assert(WK_KLASS(Object_klass) == NULL, "preloaded classes should only be initialized once");
// Preload commonly used klasses
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -92,93 +92,93 @@
// The order of these definitions is significant; it is the order in which
// preloading is actually performed by initialize_preloaded_classes.
-#define WK_KLASSES_DO(template) \
- /* well-known classes */ \
- template(Object_klass, java_lang_Object, Pre) \
- template(String_klass, java_lang_String, Pre) \
- template(Class_klass, java_lang_Class, Pre) \
- template(Cloneable_klass, java_lang_Cloneable, Pre) \
- template(ClassLoader_klass, java_lang_ClassLoader, Pre) \
- template(Serializable_klass, java_io_Serializable, Pre) \
- template(System_klass, java_lang_System, Pre) \
- template(Throwable_klass, java_lang_Throwable, Pre) \
- template(Error_klass, java_lang_Error, Pre) \
- template(ThreadDeath_klass, java_lang_ThreadDeath, Pre) \
- template(Exception_klass, java_lang_Exception, Pre) \
- template(RuntimeException_klass, java_lang_RuntimeException, Pre) \
- template(ProtectionDomain_klass, java_security_ProtectionDomain, Pre) \
- template(AccessControlContext_klass, java_security_AccessControlContext, Pre) \
- template(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \
- template(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre) \
- template(LinkageError_klass, java_lang_LinkageError, Pre) \
- template(ClassCastException_klass, java_lang_ClassCastException, Pre) \
- template(ArrayStoreException_klass, java_lang_ArrayStoreException, Pre) \
- template(VirtualMachineError_klass, java_lang_VirtualMachineError, Pre) \
- template(OutOfMemoryError_klass, java_lang_OutOfMemoryError, Pre) \
- template(StackOverflowError_klass, java_lang_StackOverflowError, Pre) \
- template(IllegalMonitorStateException_klass, java_lang_IllegalMonitorStateException, Pre) \
- template(Reference_klass, java_lang_ref_Reference, Pre) \
- \
- /* Preload ref klasses and set reference types */ \
- template(SoftReference_klass, java_lang_ref_SoftReference, Pre) \
- template(WeakReference_klass, java_lang_ref_WeakReference, Pre) \
- template(FinalReference_klass, java_lang_ref_FinalReference, Pre) \
- template(PhantomReference_klass, java_lang_ref_PhantomReference, Pre) \
- template(Finalizer_klass, java_lang_ref_Finalizer, Pre) \
- \
- template(Thread_klass, java_lang_Thread, Pre) \
- template(ThreadGroup_klass, java_lang_ThreadGroup, Pre) \
- template(Properties_klass, java_util_Properties, Pre) \
- template(reflect_AccessibleObject_klass, java_lang_reflect_AccessibleObject, Pre) \
- template(reflect_Field_klass, java_lang_reflect_Field, Pre) \
- template(reflect_Method_klass, java_lang_reflect_Method, Pre) \
- template(reflect_Constructor_klass, java_lang_reflect_Constructor, Pre) \
- \
+#define WK_KLASSES_DO(do_klass) \
+ /* well-known classes */ \
+ do_klass(Object_klass, java_lang_Object, Pre ) \
+ do_klass(String_klass, java_lang_String, Pre ) \
+ do_klass(Class_klass, java_lang_Class, Pre ) \
+ do_klass(Cloneable_klass, java_lang_Cloneable, Pre ) \
+ do_klass(ClassLoader_klass, java_lang_ClassLoader, Pre ) \
+ do_klass(Serializable_klass, java_io_Serializable, Pre ) \
+ do_klass(System_klass, java_lang_System, Pre ) \
+ do_klass(Throwable_klass, java_lang_Throwable, Pre ) \
+ do_klass(Error_klass, java_lang_Error, Pre ) \
+ do_klass(ThreadDeath_klass, java_lang_ThreadDeath, Pre ) \
+ do_klass(Exception_klass, java_lang_Exception, Pre ) \
+ do_klass(RuntimeException_klass, java_lang_RuntimeException, Pre ) \
+ do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \
+ do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \
+ do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \
+ do_klass(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre ) \
+ do_klass(LinkageError_klass, java_lang_LinkageError, Pre ) \
+ do_klass(ClassCastException_klass, java_lang_ClassCastException, Pre ) \
+ do_klass(ArrayStoreException_klass, java_lang_ArrayStoreException, Pre ) \
+ do_klass(VirtualMachineError_klass, java_lang_VirtualMachineError, Pre ) \
+ do_klass(OutOfMemoryError_klass, java_lang_OutOfMemoryError, Pre ) \
+ do_klass(StackOverflowError_klass, java_lang_StackOverflowError, Pre ) \
+ do_klass(IllegalMonitorStateException_klass, java_lang_IllegalMonitorStateException, Pre ) \
+ do_klass(Reference_klass, java_lang_ref_Reference, Pre ) \
+ \
+ /* Preload ref klasses and set reference types */ \
+ do_klass(SoftReference_klass, java_lang_ref_SoftReference, Pre ) \
+ do_klass(WeakReference_klass, java_lang_ref_WeakReference, Pre ) \
+ do_klass(FinalReference_klass, java_lang_ref_FinalReference, Pre ) \
+ do_klass(PhantomReference_klass, java_lang_ref_PhantomReference, Pre ) \
+ do_klass(Finalizer_klass, java_lang_ref_Finalizer, Pre ) \
+ \
+ do_klass(Thread_klass, java_lang_Thread, Pre ) \
+ do_klass(ThreadGroup_klass, java_lang_ThreadGroup, Pre ) \
+ do_klass(Properties_klass, java_util_Properties, Pre ) \
+ do_klass(reflect_AccessibleObject_klass, java_lang_reflect_AccessibleObject, Pre ) \
+ do_klass(reflect_Field_klass, java_lang_reflect_Field, Pre ) \
+ do_klass(reflect_Method_klass, java_lang_reflect_Method, Pre ) \
+ do_klass(reflect_Constructor_klass, java_lang_reflect_Constructor, Pre ) \
+ \
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
- template(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt) \
- template(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
- template(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
- template(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt) \
- template(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15) \
- template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
- \
- /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
- template(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292) \
- template(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292) \
- template(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292) \
- template(LambdaForm_klass, java_lang_invoke_LambdaForm, Opt) \
- template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \
- template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \
- template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
- template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \
- template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \
- template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \
- /* Note: MethodHandle must be first, and VolatileCallSite last in group */ \
- \
- template(StringBuffer_klass, java_lang_StringBuffer, Pre) \
- template(StringBuilder_klass, java_lang_StringBuilder, Pre) \
- \
- /* It's NULL in non-1.4 JDKs. */ \
- template(StackTraceElement_klass, java_lang_StackTraceElement, Opt) \
- /* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
- /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
- template(nio_Buffer_klass, java_nio_Buffer, Opt) \
- \
- template(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
- \
- template(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \
- \
- /* Preload boxing klasses */ \
- template(Boolean_klass, java_lang_Boolean, Pre) \
- template(Character_klass, java_lang_Character, Pre) \
- template(Float_klass, java_lang_Float, Pre) \
- template(Double_klass, java_lang_Double, Pre) \
- template(Byte_klass, java_lang_Byte, Pre) \
- template(Short_klass, java_lang_Short, Pre) \
- template(Integer_klass, java_lang_Integer, Pre) \
- template(Long_klass, java_lang_Long, Pre) \
+ do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \
+ do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
+ do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
+ do_klass(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt ) \
+ do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15 ) \
+ do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15 ) \
+ \
+ /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
+ do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
+ do_klass(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292 ) \
+ do_klass(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292 ) \
+ do_klass(LambdaForm_klass, java_lang_invoke_LambdaForm, Opt ) \
+ do_klass(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292 ) \
+ do_klass(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292 ) \
+ do_klass(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292 ) \
+ do_klass(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292 ) \
+ do_klass(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292 ) \
+ do_klass(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292 ) \
+ /* Note: MethodHandle must be first, and VolatileCallSite last in group */ \
+ \
+ do_klass(StringBuffer_klass, java_lang_StringBuffer, Pre ) \
+ do_klass(StringBuilder_klass, java_lang_StringBuilder, Pre ) \
+ \
+ /* It's NULL in non-1.4 JDKs. */ \
+ do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \
+ /* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
+ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
+ do_klass(nio_Buffer_klass, java_nio_Buffer, Opt ) \
+ \
+ do_klass(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel ) \
+ \
+ do_klass(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt ) \
+ \
+ /* Preload boxing klasses */ \
+ do_klass(Boolean_klass, java_lang_Boolean, Pre ) \
+ do_klass(Character_klass, java_lang_Character, Pre ) \
+ do_klass(Float_klass, java_lang_Float, Pre ) \
+ do_klass(Double_klass, java_lang_Double, Pre ) \
+ do_klass(Byte_klass, java_lang_Byte, Pre ) \
+ do_klass(Short_klass, java_lang_Short, Pre ) \
+ do_klass(Integer_klass, java_lang_Integer, Pre ) \
+ do_klass(Long_klass, java_lang_Long, Pre ) \
/*end*/
@@ -280,9 +280,6 @@
Handle protection_domain,
TRAPS);
- // If the given name is known to vmSymbols, return the well-know klass:
- static Klass* find_well_known_klass(Symbol* class_name);
-
// Lookup an instance or array class that has already been loaded
// either into the given class loader, or else into another class
// loader that is constrained (via loader constraints) to produce
@@ -392,9 +389,9 @@
return k;
}
- static Klass* check_klass_Pre(Klass* k) { return check_klass(k); }
+ static Klass* check_klass_Pre( Klass* k) { return check_klass(k); }
static Klass* check_klass_Pre_JSR292(Klass* k) { return EnableInvokeDynamic ? check_klass(k) : k; }
- static Klass* check_klass_Opt(Klass* k) { return k; }
+ static Klass* check_klass_Opt( Klass* k) { return k; }
static Klass* check_klass_Opt_Kernel(Klass* k) { return k; } //== Opt
static Klass* check_klass_Opt_Only_JDK15(Klass* k) {
assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only");
--- a/hotspot/src/share/vm/classfile/vmSymbols.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -324,24 +324,6 @@
return vmIntrinsics::_none;
}
-Method* vmIntrinsics::method_for(vmIntrinsics::ID id) {
- if (id == _none) return NULL;
- Symbol* cname = vmSymbols::symbol_at(class_for(id));
- Symbol* mname = vmSymbols::symbol_at(name_for(id));
- Symbol* msig = vmSymbols::symbol_at(signature_for(id));
- if (cname == NULL || mname == NULL || msig == NULL) return NULL;
- Klass* k = SystemDictionary::find_well_known_klass(cname);
- if (k == NULL) return NULL;
- Method* m = InstanceKlass::cast(k)->find_method(mname, msig);
- if (m == NULL &&
- cname == vmSymbols::java_lang_invoke_MethodHandle() &&
- msig == vmSymbols::star_name()) {
- // Any signature polymorphic method is represented by a fixed concrete signature:
- m = InstanceKlass::cast(k)->find_method(mname, vmSymbols::object_array_object_signature());
- }
- return m;
-}
-
#define VM_INTRINSIC_INITIALIZE(id, klass, name, sig, flags) #id "\0"
static const char* vm_intrinsic_name_bodies =
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -1138,9 +1138,6 @@
static const char* short_name_as_C_string(ID id, char* buf, int size);
- // Access to intrinsic methods:
- static Method* method_for(ID id);
-
// Wrapper object methods:
static ID for_boxing(BasicType type);
static ID for_unboxing(BasicType type);
--- a/hotspot/src/share/vm/code/dependencies.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/code/dependencies.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -333,12 +333,14 @@
for (int j = 0; j < stride; j++) {
if (j == skipj) continue;
ciBaseObject* v = deps->at(i+j);
+ int idx;
if (v->is_object()) {
- bytes.write_int(_oop_recorder->find_index(v->as_object()->constant_encoding()));
+ idx = _oop_recorder->find_index(v->as_object()->constant_encoding());
} else {
ciMetadata* meta = v->as_metadata();
- bytes.write_int(_oop_recorder->find_index(meta->constant_encoding()));
+ idx = _oop_recorder->find_index(meta->constant_encoding());
}
+ bytes.write_int(idx);
}
}
}
@@ -573,8 +575,8 @@
if (type() == call_site_target_value) {
args[j] = argument_oop(j);
} else {
- args[j] = argument(j);
- }
+ args[j] = argument(j);
+ }
}
if (_deps != NULL && _deps->log() != NULL) {
Dependencies::write_dependency_to(_deps->log(),
@@ -665,6 +667,14 @@
Metadata* Dependencies::DepStream::argument(int i) {
Metadata* result = recorded_metadata_at(argument_index(i));
+
+ if (result == NULL) { // Explicit context argument can be compressed
+ int ctxkj = dep_context_arg(type()); // -1 if no explicit context arg
+ if (ctxkj >= 0 && i == ctxkj && ctxkj+1 < argument_count()) {
+ result = ctxk_encoded_as_null(type(), argument(ctxkj+1));
+ }
+ }
+
assert(result == NULL || result->is_klass() || result->is_method(), "must be");
return result;
}
@@ -680,25 +690,21 @@
// Most dependencies have an explicit context type argument.
{
- int ctxkj = dep_context_arg(_type); // -1 if no explicit context arg
+ int ctxkj = dep_context_arg(type()); // -1 if no explicit context arg
if (ctxkj >= 0) {
Metadata* k = argument(ctxkj);
- if (k != NULL) { // context type was not compressed away
- assert(k->is_klass(), "type check");
- return (Klass*) k;
- }
- // recompute "default" context type
- return ctxk_encoded_as_null(_type, argument(ctxkj+1));
+ assert(k != NULL && k->is_klass(), "type check");
+ return (Klass*)k;
}
}
// Some dependencies are using the klass of the first object
// argument as implicit context type (e.g. call_site_target_value).
{
- int ctxkj = dep_implicit_context_arg(_type);
+ int ctxkj = dep_implicit_context_arg(type());
if (ctxkj >= 0) {
Klass* k = argument_oop(ctxkj)->klass();
- assert(k->is_klass(), "type check");
+ assert(k != NULL && k->is_klass(), "type check");
return (Klass*) k;
}
}
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1570,7 +1570,8 @@
}
CompileLog* log = thread->log();
if (log != NULL) {
- log->begin_elem("start_compile_thread thread='" UINTX_FORMAT "' process='%d'",
+ log->begin_elem("start_compile_thread name='%s' thread='" UINTX_FORMAT "' process='%d'",
+ thread->name(),
os::current_thread_id(),
os::current_process_id());
log->stamp();
--- a/hotspot/src/share/vm/compiler/compileLog.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/compiler/compileLog.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -302,3 +302,48 @@
char buf[4 * K];
finish_log_on_error(file, buf, sizeof(buf));
}
+
+// ------------------------------------------------------------------
+// CompileLog::inline_success
+//
+// Print about successful method inlining.
+void CompileLog::inline_success(const char* reason) {
+ begin_elem("inline_success reason='");
+ text(reason);
+ end_elem("'");
+}
+
+// ------------------------------------------------------------------
+// CompileLog::inline_fail
+//
+// Print about failed method inlining.
+void CompileLog::inline_fail(const char* reason) {
+ begin_elem("inline_fail reason='");
+ text(reason);
+ end_elem("'");
+}
+
+// ------------------------------------------------------------------
+// CompileLog::set_context
+//
+// Set XML tag as an optional marker - it is printed only if
+// there are other entries after until it is reset.
+void CompileLog::set_context(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ clear_context();
+ _context.print("<");
+ _context.vprint(format, ap);
+ _context.print_cr("/>");
+ va_end(ap);
+}
+
+// ------------------------------------------------------------------
+// CompileLog::code_cache_state
+//
+// Print code cache state.
+void CompileLog::code_cache_state() {
+ begin_elem("code_cache");
+ CodeCache::log_state(this);
+ end_elem("");
+}
--- a/hotspot/src/share/vm/compiler/compileLog.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/compiler/compileLog.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -62,7 +62,13 @@
intx thread_id() { return _thread_id; }
const char* file() { return _file; }
+
+ // Optional context marker, to help place actions that occur during
+ // parsing. If there is no log output until the next context string
+ // or reset, context string will be silently ignored
stringStream* context() { return &_context; }
+ void clear_context() { context()->reset(); }
+ void set_context(const char* format, ...);
void name(ciSymbol* s); // name='s'
void name(Symbol* s) { xmlStream::name(s); }
@@ -71,6 +77,9 @@
int identify(ciBaseObject* obj);
void clear_identities();
+ void inline_fail (const char* reason);
+ void inline_success(const char* reason);
+
// virtuals
virtual void see_tag(const char* tag, bool push);
virtual void pop_tag(const char* tag);
@@ -78,6 +87,9 @@
// make a provisional end of log mark
void mark_file_end() { _file_end = out()->count(); }
+ // Print code cache statistics
+ void code_cache_state();
+
// copy all logs to the given stream
static void finish_log(outputStream* out);
static void finish_log_on_error(outputStream* out, char *buf, int buflen);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -222,7 +222,7 @@
// depends on this property.
debug_only(
FreeChunk* junk = NULL;
- assert(UseCompressedOops ||
+ assert(UseCompressedKlassPointers ||
junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
"Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc");
--- a/hotspot/src/share/vm/memory/metaspace.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -135,6 +135,8 @@
MetaWord* expand_and_allocate(size_t size,
MetadataType mdtype);
+ static bool is_initialized() { return _class_space_list != NULL; }
+
#ifndef PRODUCT
bool contains(const void *ptr) const;
bool contains_class(const void *ptr) const;
--- a/hotspot/src/share/vm/memory/universe.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -151,7 +151,9 @@
CollectedHeap* Universe::_collectedHeap = NULL;
-NarrowOopStruct Universe::_narrow_oop = { NULL, 0, true };
+NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
+NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
+address Universe::_narrow_ptrs_base;
void Universe::basic_type_classes_do(void f(Klass*)) {
@@ -807,7 +809,7 @@
}
if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) {
// Can't reserve heap below 32Gb.
- Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size());
+ // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
if (verbose) {
tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
@@ -838,8 +840,16 @@
tty->cr();
tty->cr();
}
+ if (UseCompressedKlassPointers) {
+ Universe::set_narrow_klass_base(Universe::narrow_oop_base());
+ Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
+ }
+ Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
}
- assert(Universe::narrow_oop_base() == (Universe::heap()->base() - os::vm_page_size()) ||
+ // Universe::narrow_oop_base() is one page below the metaspace
+ // base. The actual metaspace base depends on alignment constraints
+ // so we don't know its exact location here.
+ assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
Universe::narrow_oop_base() == NULL, "invalid value");
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
Universe::narrow_oop_shift() == 0, "invalid value");
@@ -861,7 +871,10 @@
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
// Add in the class metaspace area so the classes in the headers can
// be compressed the same as instances.
- size_t total_reserved = align_size_up(heap_size + ClassMetaspaceSize, alignment);
+ // Need to round class space size up because it's below the heap and
+ // the actual alignment depends on its size.
+ size_t metaspace_size = align_size_up(ClassMetaspaceSize, alignment);
+ size_t total_reserved = align_size_up(heap_size + metaspace_size, alignment);
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
@@ -895,11 +908,23 @@
return total_rs;
}
- // Split the reserved space into main Java heap and a space for classes
- // so that they can be compressed using the same algorithm as compressed oops
- ReservedSpace heap_rs = total_rs.first_part(heap_size);
- ReservedSpace class_rs = total_rs.last_part(heap_size, alignment);
+ // Split the reserved space into main Java heap and a space for
+ // classes so that they can be compressed using the same algorithm
+ // as compressed oops. If compress oops and compress klass ptrs are
+ // used we need the meta space first: if the alignment used for
+ // compressed oops is greater than the one used for compressed klass
+ // ptrs, a metadata space on top of the heap could become
+ // unreachable.
+ ReservedSpace class_rs = total_rs.first_part(metaspace_size);
+ ReservedSpace heap_rs = total_rs.last_part(metaspace_size, alignment);
Metaspace::initialize_class_space(class_rs);
+
+ if (UseCompressedOops) {
+ // Universe::initialize_heap() will reset this to NULL if unscaled
+ // or zero-based narrow oops are actually used.
+ address base = (address)(total_rs.base() - os::vm_page_size());
+ Universe::set_narrow_oop_base(base);
+ }
return heap_rs;
}
--- a/hotspot/src/share/vm/memory/universe.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/memory/universe.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -105,16 +105,16 @@
Method* get_Method();
};
-// For UseCompressedOops.
-struct NarrowOopStruct {
- // Base address for oop-within-java-object materialization.
- // NULL if using wide oops or zero based narrow oops.
+// For UseCompressedOops and UseCompressedKlassPointers.
+struct NarrowPtrStruct {
+ // Base address for oop/klass-within-java-object materialization.
+ // NULL if using wide oops/klasses or zero based narrow oops/klasses.
address _base;
- // Number of shift bits for encoding/decoding narrow oops.
- // 0 if using wide oops or zero based unscaled narrow oops,
- // LogMinObjAlignmentInBytes otherwise.
+ // Number of shift bits for encoding/decoding narrow ptrs.
+ // 0 if using wide ptrs or zero based unscaled narrow ptrs,
+ // LogMinObjAlignmentInBytes/LogKlassAlignmentInBytes otherwise.
int _shift;
- // Generate code with implicit null checks for narrow oops.
+ // Generate code with implicit null checks for narrow ptrs.
bool _use_implicit_null_checks;
};
@@ -206,7 +206,10 @@
static CollectedHeap* _collectedHeap;
// For UseCompressedOops.
- static struct NarrowOopStruct _narrow_oop;
+ static struct NarrowPtrStruct _narrow_oop;
+ // For UseCompressedKlassPointers.
+ static struct NarrowPtrStruct _narrow_klass;
+ static address _narrow_ptrs_base;
// array of dummy objects used with +FullGCAlot
debug_only(static objArrayOop _fullgc_alot_dummy_array;)
@@ -259,8 +262,21 @@
HeapBasedNarrowOop = 2
};
static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
- static void set_narrow_oop_base(address base) { _narrow_oop._base = base; }
- static void set_narrow_oop_use_implicit_null_checks(bool use) { _narrow_oop._use_implicit_null_checks = use; }
+ static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
+ static void set_narrow_oop_base(address base) {
+ assert(UseCompressedOops, "no compressed oops?");
+ _narrow_oop._base = base;
+ }
+ static void set_narrow_klass_base(address base) {
+ assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
+ _narrow_klass._base = base;
+ }
+ static void set_narrow_oop_use_implicit_null_checks(bool use) {
+ assert(UseCompressedOops, "no compressed ptrs?");
+ _narrow_oop._use_implicit_null_checks = use;
+ }
+ static bool reserve_metaspace_helper(bool with_base = false);
+ static ReservedHeapSpace reserve_heap_metaspace(size_t heap_size, size_t alignment, bool& contiguous);
// Debugging
static int _verify_count; // number of verifies done
@@ -354,14 +370,30 @@
static CollectedHeap* heap() { return _collectedHeap; }
// For UseCompressedOops
- static address* narrow_oop_base_addr() { return &_narrow_oop._base; }
- static address narrow_oop_base() { return _narrow_oop._base; }
- static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
- static int narrow_oop_shift() { return _narrow_oop._shift; }
- static bool narrow_oop_use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
+ static address narrow_oop_base() { return _narrow_oop._base; }
+ static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
+ static int narrow_oop_shift() { return _narrow_oop._shift; }
+ static bool narrow_oop_use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
+
+ // For UseCompressedKlassPointers
+ static address narrow_klass_base() { return _narrow_klass._base; }
+ static bool is_narrow_klass_base(void* addr) { return (narrow_klass_base() == (address)addr); }
+ static int narrow_klass_shift() { return _narrow_klass._shift; }
+ static bool narrow_klass_use_implicit_null_checks() { return _narrow_klass._use_implicit_null_checks; }
+
+ static address* narrow_ptrs_base_addr() { return &_narrow_ptrs_base; }
+ static void set_narrow_ptrs_base(address a) { _narrow_ptrs_base = a; }
+ static address narrow_ptrs_base() { return _narrow_ptrs_base; }
// this is set in vm_version on sparc (and then reset in universe afaict)
- static void set_narrow_oop_shift(int shift) { _narrow_oop._shift = shift; }
+ static void set_narrow_oop_shift(int shift) {
+ _narrow_oop._shift = shift;
+ }
+
+ static void set_narrow_klass_shift(int shift) {
+ assert(shift == 0 || shift == LogKlassAlignmentInBytes, "invalid shift for klass ptrs");
+ _narrow_klass._shift = shift;
+ }
// Reserve Java heap and determine CompressedOops mode
static ReservedSpace reserve_heap(size_t heap_size, size_t alignment);
--- a/hotspot/src/share/vm/oops/instanceOop.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/oops/instanceOop.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -37,7 +37,9 @@
// If compressed, the offset of the fields of the instance may not be aligned.
static int base_offset_in_bytes() {
- return UseCompressedKlassPointers ?
+ // offset computation code breaks if UseCompressedKlassPointers
+ // only is true
+ return (UseCompressedOops && UseCompressedKlassPointers) ?
klass_gap_offset_in_bytes() :
sizeof(instanceOopDesc);
}
--- a/hotspot/src/share/vm/oops/method.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -712,7 +712,8 @@
}
if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
ttyLocker ttyl;
- xtty->begin_elem("make_not_%scompilable thread='%d'", is_osr ? "osr_" : "", (int) os::current_thread_id());
+ xtty->begin_elem("make_not_%scompilable thread='" UINTX_FORMAT "'",
+ is_osr ? "osr_" : "", os::current_thread_id());
xtty->method(this);
xtty->stamp();
xtty->end_elem();
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -185,8 +185,8 @@
inline bool check_obj_alignment(oop obj) {
return (intptr_t)obj % MinObjAlignmentInBytes == 0;
}
-inline bool check_obj_alignment(Klass* obj) {
- return (intptr_t)obj % MinObjAlignmentInBytes == 0;
+inline bool check_klass_alignment(Klass* obj) {
+ return (intptr_t)obj % KlassAlignmentInBytes == 0;
}
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
@@ -228,9 +228,9 @@
inline narrowOop oopDesc::encode_klass_not_null(Klass* v) {
assert(!is_null(v), "oop value can never be zero");
- assert(check_obj_alignment(v), "Address not aligned");
- address base = Universe::narrow_oop_base();
- int shift = Universe::narrow_oop_shift();
+ assert(check_klass_alignment(v), "Address not aligned");
+ address base = Universe::narrow_klass_base();
+ int shift = Universe::narrow_klass_shift();
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
uint64_t result = pd >> shift;
@@ -245,10 +245,10 @@
inline Klass* oopDesc::decode_klass_not_null(narrowOop v) {
assert(!is_null(v), "narrow oop value can never be zero");
- address base = Universe::narrow_oop_base();
- int shift = Universe::narrow_oop_shift();
+ address base = Universe::narrow_klass_base();
+ int shift = Universe::narrow_klass_shift();
Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
- assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
+ assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
return result;
}
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -439,9 +439,7 @@
WarmCallInfo wci = *(initial_wci);
failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
if (failure_msg != NULL && C->log() != NULL) {
- C->log()->begin_elem("inline_fail reason='");
- C->log()->text("%s", failure_msg);
- C->log()->end_elem("'");
+ C->log()->inline_fail(failure_msg);
}
#ifndef PRODUCT
--- a/hotspot/src/share/vm/opto/cfgnode.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1386,7 +1386,7 @@
Node *n = phi->in(i);
if( !n ) return NULL;
if( phase->type(n) == Type::TOP ) return NULL;
- if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN )
+ if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass )
break;
}
if( i >= phi->req() ) // Only split for constants
@@ -1875,17 +1875,19 @@
}
#ifdef _LP64
- // Push DecodeN down through phi.
+ // Push DecodeN/DecodeNKlass down through phi.
// The rest of phi graph will transform by split EncodeP node though phis up.
- if (UseCompressedOops && can_reshape && progress == NULL) {
+ if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
bool may_push = true;
bool has_decodeN = false;
+ bool is_decodeN = false;
for (uint i=1; i<req(); ++i) {// For all paths in
Node *ii = in(i);
- if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
+ if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
// Do optimization if a non dead path exist.
if (ii->in(1)->bottom_type() != Type::TOP) {
has_decodeN = true;
+ is_decodeN = ii->is_DecodeN();
}
} else if (!ii->is_Phi()) {
may_push = false;
@@ -1895,13 +1897,18 @@
if (has_decodeN && may_push) {
PhaseIterGVN *igvn = phase->is_IterGVN();
// Make narrow type for new phi.
- const Type* narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr());
+ const Type* narrow_t;
+ if (is_decodeN) {
+ narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr());
+ } else {
+ narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr());
+ }
PhiNode* new_phi = new (phase->C) PhiNode(r, narrow_t);
uint orig_cnt = req();
for (uint i=1; i<req(); ++i) {// For all paths in
Node *ii = in(i);
Node* new_ii = NULL;
- if (ii->is_DecodeN()) {
+ if (ii->is_DecodeNarrowPtr()) {
assert(ii->bottom_type() == bottom_type(), "sanity");
new_ii = ii->in(1);
} else {
@@ -1909,14 +1916,22 @@
if (ii->as_Phi() == this) {
new_ii = new_phi;
} else {
- new_ii = new (phase->C) EncodePNode(ii, narrow_t);
+ if (is_decodeN) {
+ new_ii = new (phase->C) EncodePNode(ii, narrow_t);
+ } else {
+ new_ii = new (phase->C) EncodePKlassNode(ii, narrow_t);
+ }
igvn->register_new_node_with_optimizer(new_ii);
}
}
new_phi->set_req(i, new_ii);
}
igvn->register_new_node_with_optimizer(new_phi, this);
- progress = new (phase->C) DecodeNNode(new_phi, bottom_type());
+ if (is_decodeN) {
+ progress = new (phase->C) DecodeNNode(new_phi, bottom_type());
+ } else {
+ progress = new (phase->C) DecodeNKlassNode(new_phi, bottom_type());
+ }
}
}
#endif
--- a/hotspot/src/share/vm/opto/classes.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/classes.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -91,6 +91,7 @@
macro(GetAndSetN)
macro(Con)
macro(ConN)
+macro(ConNKlass)
macro(ConD)
macro(ConF)
macro(ConI)
@@ -118,6 +119,7 @@
macro(CountTrailingZerosL)
macro(CreateEx)
macro(DecodeN)
+macro(DecodeNKlass)
macro(DivD)
macro(DivF)
macro(DivI)
@@ -126,6 +128,7 @@
macro(DivModI)
macro(DivModL)
macro(EncodeP)
+macro(EncodePKlass)
macro(ExpD)
macro(FastLock)
macro(FastUnlock)
@@ -147,7 +150,6 @@
macro(LoadD_unaligned)
macro(LoadF)
macro(LoadI)
-macro(LoadUI2L)
macro(LoadKlass)
macro(LoadNKlass)
macro(LoadL)
@@ -233,6 +235,7 @@
macro(StoreL)
macro(StoreP)
macro(StoreN)
+macro(StoreNKlass)
macro(StrComp)
macro(StrEquals)
macro(StrIndexOf)
--- a/hotspot/src/share/vm/opto/compile.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -828,6 +828,9 @@
has_unsafe_access(),
SharedRuntime::is_wide_vector(max_vector_size())
);
+
+ if (log() != NULL) // Print code cache state into compiler log
+ log()->code_cache_state();
}
}
@@ -2236,6 +2239,7 @@
nop != Op_CreateEx &&
nop != Op_CheckCastPP &&
nop != Op_DecodeN &&
+ nop != Op_DecodeNKlass &&
!n->is_Mem() ) {
Node *x = n->clone();
call->set_req( TypeFunc::Parms, x );
@@ -2284,11 +2288,11 @@
case Op_GetAndSetN:
case Op_StoreP:
case Op_StoreN:
+ case Op_StoreNKlass:
case Op_LoadB:
case Op_LoadUB:
case Op_LoadUS:
case Op_LoadI:
- case Op_LoadUI2L:
case Op_LoadKlass:
case Op_LoadNKlass:
case Op_LoadL:
@@ -2318,7 +2322,7 @@
addp->in(AddPNode::Base) == n->in(AddPNode::Base),
"Base pointers must match" );
#ifdef _LP64
- if (UseCompressedOops &&
+ if ((UseCompressedOops || UseCompressedKlassPointers) &&
addp->Opcode() == Op_ConP &&
addp == n->in(AddPNode::Base) &&
n->in(AddPNode::Offset)->is_Con()) {
@@ -2327,16 +2331,18 @@
// instructions (4) then load 64-bits constant (7).
// Do this transformation here since IGVN will convert ConN back to ConP.
const Type* t = addp->bottom_type();
- if (t->isa_oopptr()) {
+ if (t->isa_oopptr() || t->isa_klassptr()) {
Node* nn = NULL;
+ int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
+
// Look for existing ConN node of the same exact type.
Compile* C = Compile::current();
Node* r = C->root();
uint cnt = r->outcnt();
for (uint i = 0; i < cnt; i++) {
Node* m = r->raw_out(i);
- if (m!= NULL && m->Opcode() == Op_ConN &&
+ if (m!= NULL && m->Opcode() == op &&
m->bottom_type()->make_ptr() == t) {
nn = m;
break;
@@ -2345,7 +2351,11 @@
if (nn != NULL) {
// Decode a narrow oop to match address
// [R12 + narrow_oop_reg<<3 + offset]
- nn = new (C) DecodeNNode(nn, t);
+ if (t->isa_oopptr()) {
+ nn = new (C) DecodeNNode(nn, t);
+ } else {
+ nn = new (C) DecodeNKlassNode(nn, t);
+ }
n->set_req(AddPNode::Base, nn);
n->set_req(AddPNode::Address, nn);
if (addp->outcnt() == 0) {
@@ -2400,22 +2410,24 @@
case Op_CmpP:
// Do this transformation here to preserve CmpPNode::sub() and
// other TypePtr related Ideal optimizations (for example, ptr nullness).
- if (n->in(1)->is_DecodeN() || n->in(2)->is_DecodeN()) {
+ if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
Node* in1 = n->in(1);
Node* in2 = n->in(2);
- if (!in1->is_DecodeN()) {
+ if (!in1->is_DecodeNarrowPtr()) {
in2 = in1;
in1 = n->in(2);
}
- assert(in1->is_DecodeN(), "sanity");
+ assert(in1->is_DecodeNarrowPtr(), "sanity");
Compile* C = Compile::current();
Node* new_in2 = NULL;
- if (in2->is_DecodeN()) {
+ if (in2->is_DecodeNarrowPtr()) {
+ assert(in2->Opcode() == in1->Opcode(), "must be same node type");
new_in2 = in2->in(1);
} else if (in2->Opcode() == Op_ConP) {
const Type* t = in2->bottom_type();
if (t == TypePtr::NULL_PTR) {
+ assert(in1->is_DecodeN(), "compare klass to null?");
// Don't convert CmpP null check into CmpN if compressed
// oops implicit null check is not generated.
// This will allow to generate normal oop implicit null check.
@@ -2460,6 +2472,8 @@
//
} else if (t->isa_oopptr()) {
new_in2 = ConNode::make(C, t->make_narrowoop());
+ } else if (t->isa_klassptr()) {
+ new_in2 = ConNode::make(C, t->make_narrowklass());
}
}
if (new_in2 != NULL) {
@@ -2476,23 +2490,28 @@
break;
case Op_DecodeN:
- assert(!n->in(1)->is_EncodeP(), "should be optimized out");
+ case Op_DecodeNKlass:
+ assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
// DecodeN could be pinned when it can't be fold into
// an address expression, see the code for Op_CastPP above.
- assert(n->in(0) == NULL || !Matcher::narrow_oop_use_complex_address(), "no control");
+ assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
break;
- case Op_EncodeP: {
+ case Op_EncodeP:
+ case Op_EncodePKlass: {
Node* in1 = n->in(1);
- if (in1->is_DecodeN()) {
+ if (in1->is_DecodeNarrowPtr()) {
n->subsume_by(in1->in(1));
} else if (in1->Opcode() == Op_ConP) {
Compile* C = Compile::current();
const Type* t = in1->bottom_type();
if (t == TypePtr::NULL_PTR) {
+ assert(t->isa_oopptr(), "null klass?");
n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR));
} else if (t->isa_oopptr()) {
n->subsume_by(ConNode::make(C, t->make_narrowoop()));
+ } else if (t->isa_klassptr()) {
+ n->subsume_by(ConNode::make(C, t->make_narrowklass()));
}
}
if (in1->outcnt() == 0) {
@@ -2526,7 +2545,7 @@
}
case Op_Phi:
- if (n->as_Phi()->bottom_type()->isa_narrowoop()) {
+ if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
// The EncodeP optimization may create Phi with the same edges
// for all paths. It is not handled well by Register Allocator.
Node* unique_in = n->in(1);
@@ -2689,12 +2708,13 @@
}
// Skip next transformation if compressed oops are not used.
- if (!UseCompressedOops || !Matcher::gen_narrow_oop_implicit_null_checks())
+ if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
+ (!UseCompressedOops && !UseCompressedKlassPointers))
return;
- // Go over safepoints nodes to skip DecodeN nodes for debug edges.
+ // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
// It could be done for an uncommon traps or any safepoints/calls
- // if the DecodeN node is referenced only in a debug info.
+ // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
while (sfpt.size() > 0) {
n = sfpt.pop();
JVMState *jvms = n->as_SafePoint()->jvms();
@@ -2705,7 +2725,7 @@
n->as_CallStaticJava()->uncommon_trap_request() != 0);
for (int j = start; j < end; j++) {
Node* in = n->in(j);
- if (in->is_DecodeN()) {
+ if (in->is_DecodeNarrowPtr()) {
bool safe_to_skip = true;
if (!is_uncommon ) {
// Is it safe to skip?
--- a/hotspot/src/share/vm/opto/connode.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/connode.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -45,16 +45,17 @@
//------------------------------make-------------------------------------------
ConNode *ConNode::make( Compile* C, const Type *t ) {
switch( t->basic_type() ) {
- case T_INT: return new (C) ConINode( t->is_int() );
- case T_LONG: return new (C) ConLNode( t->is_long() );
- case T_FLOAT: return new (C) ConFNode( t->is_float_constant() );
- case T_DOUBLE: return new (C) ConDNode( t->is_double_constant() );
- case T_VOID: return new (C) ConNode ( Type::TOP );
- case T_OBJECT: return new (C) ConPNode( t->is_ptr() );
- case T_ARRAY: return new (C) ConPNode( t->is_aryptr() );
- case T_ADDRESS: return new (C) ConPNode( t->is_ptr() );
- case T_NARROWOOP: return new (C) ConNNode( t->is_narrowoop() );
- case T_METADATA: return new (C) ConPNode( t->is_ptr() );
+ case T_INT: return new (C) ConINode( t->is_int() );
+ case T_LONG: return new (C) ConLNode( t->is_long() );
+ case T_FLOAT: return new (C) ConFNode( t->is_float_constant() );
+ case T_DOUBLE: return new (C) ConDNode( t->is_double_constant() );
+ case T_VOID: return new (C) ConNode ( Type::TOP );
+ case T_OBJECT: return new (C) ConPNode( t->is_ptr() );
+ case T_ARRAY: return new (C) ConPNode( t->is_aryptr() );
+ case T_ADDRESS: return new (C) ConPNode( t->is_ptr() );
+ case T_NARROWOOP: return new (C) ConNNode( t->is_narrowoop() );
+ case T_NARROWKLASS: return new (C) ConNKlassNode( t->is_narrowklass() );
+ case T_METADATA: return new (C) ConPNode( t->is_ptr() );
// Expected cases: TypePtr::NULL_PTR, any is_rawptr()
// Also seen: AnyPtr(TopPTR *+top); from command line:
// r -XX:+PrintOpto -XX:CIStart=285 -XX:+CompileTheWorld -XX:CompileTheWorldStartAt=660
@@ -447,7 +448,7 @@
// If not converting int->oop, throw away cast after constant propagation
Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
const Type *t = ccp->type(in(1));
- if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks())) {
+ if (!t->isa_oop_ptr() || ((in(1)->is_DecodeN()) && Matcher::gen_narrow_oop_implicit_null_checks())) {
return NULL; // do not transform raw pointers or narrow oops
}
return ConstraintCastNode::Ideal_DU_postCCP(ccp);
@@ -607,15 +608,56 @@
if (t == Type::TOP) return Type::TOP;
if (t == TypePtr::NULL_PTR) return TypeNarrowOop::NULL_PTR;
- assert(t->isa_oop_ptr() || UseCompressedKlassPointers && t->isa_klassptr(), "only oopptr here");
+ assert(t->isa_oop_ptr(), "only oopptr here");
return t->make_narrowoop();
}
-Node *EncodePNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
+Node *EncodeNarrowPtrNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1));
}
+Node* DecodeNKlassNode::Identity(PhaseTransform* phase) {
+ const Type *t = phase->type( in(1) );
+ if( t == Type::TOP ) return in(1);
+
+ if (in(1)->is_EncodePKlass()) {
+ // (DecodeNKlass (EncodePKlass p)) -> p
+ return in(1)->in(1);
+ }
+ return this;
+}
+
+const Type *DecodeNKlassNode::Value( PhaseTransform *phase ) const {
+ const Type *t = phase->type( in(1) );
+ if (t == Type::TOP) return Type::TOP;
+ assert(t != TypeNarrowKlass::NULL_PTR, "null klass?");
+
+ assert(t->isa_narrowklass(), "only narrow klass ptr here");
+ return t->make_ptr();
+}
+
+Node* EncodePKlassNode::Identity(PhaseTransform* phase) {
+ const Type *t = phase->type( in(1) );
+ if( t == Type::TOP ) return in(1);
+
+ if (in(1)->is_DecodeNKlass()) {
+ // (EncodePKlass (DecodeNKlass p)) -> p
+ return in(1)->in(1);
+ }
+ return this;
+}
+
+const Type *EncodePKlassNode::Value( PhaseTransform *phase ) const {
+ const Type *t = phase->type( in(1) );
+ if (t == Type::TOP) return Type::TOP;
+ assert (t != TypePtr::NULL_PTR, "null klass?");
+
+ assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
+ return t->make_narrowklass();
+}
+
+
//=============================================================================
//------------------------------Identity---------------------------------------
Node *Conv2BNode::Identity( PhaseTransform *phase ) {
--- a/hotspot/src/share/vm/opto/connode.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/connode.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -88,6 +88,14 @@
virtual int Opcode() const;
};
+//------------------------------ConNKlassNode---------------------------------
+// Simple narrow klass constants
+class ConNKlassNode : public ConNode {
+public:
+ ConNKlassNode( const TypeNarrowKlass *t ) : ConNode(t) {}
+ virtual int Opcode() const;
+};
+
//------------------------------ConLNode---------------------------------------
// Simple long constants
@@ -270,42 +278,91 @@
};
+//------------------------------EncodeNarrowPtr--------------------------------
+class EncodeNarrowPtrNode : public TypeNode {
+ protected:
+ EncodeNarrowPtrNode(Node* value, const Type* type):
+ TypeNode(type, 2) {
+ init_class_id(Class_EncodeNarrowPtr);
+ init_req(0, NULL);
+ init_req(1, value);
+ }
+ public:
+ virtual uint ideal_reg() const { return Op_RegN; }
+ virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
+};
+
//------------------------------EncodeP--------------------------------
// Encodes an oop pointers into its compressed form
// Takes an extra argument which is the real heap base as a long which
// may be useful for code generation in the backend.
-class EncodePNode : public TypeNode {
+class EncodePNode : public EncodeNarrowPtrNode {
public:
EncodePNode(Node* value, const Type* type):
- TypeNode(type, 2) {
+ EncodeNarrowPtrNode(value, type) {
init_class_id(Class_EncodeP);
- init_req(0, NULL);
- init_req(1, value);
}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
- virtual uint ideal_reg() const { return Op_RegN; }
+};
- virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
+//------------------------------EncodePKlass--------------------------------
+// Encodes a klass pointer into its compressed form
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class EncodePKlassNode : public EncodeNarrowPtrNode {
+ public:
+ EncodePKlassNode(Node* value, const Type* type):
+ EncodeNarrowPtrNode(value, type) {
+ init_class_id(Class_EncodePKlass);
+ }
+ virtual int Opcode() const;
+ virtual Node *Identity( PhaseTransform *phase );
+ virtual const Type *Value( PhaseTransform *phase ) const;
+};
+
+//------------------------------DecodeNarrowPtr--------------------------------
+class DecodeNarrowPtrNode : public TypeNode {
+ protected:
+ DecodeNarrowPtrNode(Node* value, const Type* type):
+ TypeNode(type, 2) {
+ init_class_id(Class_DecodeNarrowPtr);
+ init_req(0, NULL);
+ init_req(1, value);
+ }
+ public:
+ virtual uint ideal_reg() const { return Op_RegP; }
};
//------------------------------DecodeN--------------------------------
// Converts a narrow oop into a real oop ptr.
// Takes an extra argument which is the real heap base as a long which
// may be useful for code generation in the backend.
-class DecodeNNode : public TypeNode {
+class DecodeNNode : public DecodeNarrowPtrNode {
public:
DecodeNNode(Node* value, const Type* type):
- TypeNode(type, 2) {
+ DecodeNarrowPtrNode(value, type) {
init_class_id(Class_DecodeN);
- init_req(0, NULL);
- init_req(1, value);
}
virtual int Opcode() const;
- virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
- virtual uint ideal_reg() const { return Op_RegP; }
+ virtual Node *Identity( PhaseTransform *phase );
+};
+
+//------------------------------DecodeNKlass--------------------------------
+// Converts a narrow klass pointer into a real klass ptr.
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class DecodeNKlassNode : public DecodeNarrowPtrNode {
+ public:
+ DecodeNKlassNode(Node* value, const Type* type):
+ DecodeNarrowPtrNode(value, type) {
+ init_class_id(Class_DecodeNKlass);
+ }
+ virtual int Opcode() const;
+ virtual const Type *Value( PhaseTransform *phase ) const;
+ virtual Node *Identity( PhaseTransform *phase );
};
//------------------------------Conv2BNode-------------------------------------
--- a/hotspot/src/share/vm/opto/escape.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/escape.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -368,7 +368,9 @@
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
- case Op_DecodeN: {
+ case Op_DecodeN:
+ case Op_EncodePKlass:
+ case Op_DecodeNKlass: {
add_local_var_and_edge(n, PointsToNode::NoEscape,
n->in(1), delayed_worklist);
break;
@@ -381,7 +383,8 @@
break;
}
case Op_ConP:
- case Op_ConN: {
+ case Op_ConN:
+ case Op_ConNKlass: {
// assume all oop constants globally escape except for null
PointsToNode::EscapeState es;
if (igvn->type(n) == TypePtr::NULL_PTR ||
@@ -458,6 +461,7 @@
}
case Op_StoreP:
case Op_StoreN:
+ case Op_StoreNKlass:
case Op_StorePConditional:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: {
@@ -465,7 +469,7 @@
const Type *adr_type = igvn->type(adr);
adr_type = adr_type->make_ptr();
if (adr_type->isa_oopptr() ||
- (opcode == Op_StoreP || opcode == Op_StoreN) &&
+ (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
(adr_type == TypeRawPtr::NOTNULL &&
adr->in(AddPNode::Address)->is_Proj() &&
adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
@@ -572,7 +576,9 @@
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
- case Op_DecodeN: {
+ case Op_DecodeN:
+ case Op_EncodePKlass:
+ case Op_DecodeNKlass: {
add_local_var_and_edge(n, PointsToNode::NoEscape,
n->in(1), NULL);
break;
@@ -646,6 +652,7 @@
}
case Op_StoreP:
case Op_StoreN:
+ case Op_StoreNKlass:
case Op_StorePConditional:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
@@ -661,7 +668,7 @@
const Type *adr_type = _igvn->type(adr);
adr_type = adr_type->make_ptr();
if (adr_type->isa_oopptr() ||
- (opcode == Op_StoreP || opcode == Op_StoreN) &&
+ (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
(adr_type == TypeRawPtr::NOTNULL &&
adr->in(AddPNode::Address)->is_Proj() &&
adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
@@ -2088,7 +2095,7 @@
Node* uncast_base = base->uncast();
int opcode = uncast_base->Opcode();
assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
- opcode == Op_CastX2P || uncast_base->is_DecodeN() ||
+ opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
(uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
(uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
}
@@ -2837,8 +2844,8 @@
alloc_worklist.append_if_missing(use);
} else if (use->is_Phi() ||
use->is_CheckCastPP() ||
- use->is_EncodeP() ||
- use->is_DecodeN() ||
+ use->is_EncodeNarrowPtr() ||
+ use->is_DecodeNarrowPtr() ||
(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
alloc_worklist.append_if_missing(use);
#ifdef ASSERT
--- a/hotspot/src/share/vm/opto/graphKit.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1115,7 +1115,7 @@
// short-circuit a common case
jint offset_con = find_int_con(offset, Type::OffsetBot);
if (offset_con != Type::OffsetBot) {
- return longcon((long) offset_con);
+ return longcon((jlong) offset_con);
}
return _gvn.transform( new (C) ConvI2LNode(offset));
}
--- a/hotspot/src/share/vm/opto/lcm.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -164,6 +164,7 @@
case Op_StoreL:
case Op_StoreP:
case Op_StoreN:
+ case Op_StoreNKlass:
was_store = true; // Memory op is a store op
// Stores will have their address in slot 2 (memory in slot 1).
// If the value being nul-checked is in another slot, it means we
--- a/hotspot/src/share/vm/opto/library_call.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -2378,13 +2378,15 @@
}
}
- if (sharpened_klass != NULL) {
+ // The sharpened class might be unloaded if there is no class loader
+ // contraint in place.
+ if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
#ifndef PRODUCT
if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
- tty->print(" from base type: "); adr_type->dump();
- tty->print(" sharpened value: "); tjp->dump();
+ tty->print(" from base type: "); adr_type->dump();
+ tty->print(" sharpened value: "); tjp->dump();
}
#endif
// Sharpen the value type.
@@ -4381,7 +4383,7 @@
// 12 - 64-bit VM, compressed klass
// 16 - 64-bit VM, normal klass
if (base_off % BytesPerLong != 0) {
- assert(UseCompressedOops, "");
+ assert(UseCompressedKlassPointers, "");
if (is_array) {
// Exclude length to copy by 8 bytes words.
base_off += sizeof(int);
--- a/hotspot/src/share/vm/opto/live.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/live.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -331,6 +331,7 @@
#ifdef _LP64
UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
+ UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
#endif
check->as_Mach()->ideal_Opcode() == Op_LoadP ||
check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
--- a/hotspot/src/share/vm/opto/loopTransform.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -92,10 +92,10 @@
limit_n != NULL && limit_n->is_Con()) {
// Use longs to avoid integer overflow.
int stride_con = cl->stride_con();
- long init_con = cl->init_trip()->get_int();
- long limit_con = cl->limit()->get_int();
+ jlong init_con = cl->init_trip()->get_int();
+ jlong limit_con = cl->limit()->get_int();
int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
- long trip_count = (limit_con - init_con + stride_m)/stride_con;
+ jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
// Set exact trip count.
cl->set_exact_trip_count((uint)trip_count);
@@ -1212,16 +1212,16 @@
} else if (loop_head->has_exact_trip_count() && init->is_Con()) {
// Loop's limit is constant. Loop's init could be constant when pre-loop
// become peeled iteration.
- long init_con = init->get_int();
+ jlong init_con = init->get_int();
// We can keep old loop limit if iterations count stays the same:
// old_trip_count == new_trip_count * 2
// Note: since old_trip_count >= 2 then new_trip_count >= 1
// so we also don't need to adjust zero trip test.
- long limit_con = limit->get_int();
+ jlong limit_con = limit->get_int();
// (stride_con*2) not overflow since stride_con <= 8.
int new_stride_con = stride_con * 2;
int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1);
- long trip_count = (limit_con - init_con + stride_m)/new_stride_con;
+ jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con;
// New trip count should satisfy next conditions.
assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity");
uint new_trip_count = (uint)trip_count;
@@ -2413,7 +2413,7 @@
break;
}
int opc = n->Opcode();
- if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) {
+ if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) {
msg = "oop fills not handled";
break;
}
--- a/hotspot/src/share/vm/opto/loopnode.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -328,12 +328,12 @@
const TypeInt* limit_t = gvn->type(limit)->is_int();
if (stride_con > 0) {
- long init_p = (long)init_t->_lo + stride_con;
- if (init_p > (long)max_jint || init_p > (long)limit_t->_hi)
+ jlong init_p = (jlong)init_t->_lo + stride_con;
+ if (init_p > (jlong)max_jint || init_p > (jlong)limit_t->_hi)
return false; // cyclic loop or this loop trips only once
} else {
- long init_p = (long)init_t->_hi + stride_con;
- if (init_p < (long)min_jint || init_p < (long)limit_t->_lo)
+ jlong init_p = (jlong)init_t->_hi + stride_con;
+ if (init_p < (jlong)min_jint || init_p < (jlong)limit_t->_lo)
return false; // cyclic loop or this loop trips only once
}
@@ -716,16 +716,16 @@
#endif
if (cl->has_exact_trip_count()) {
// Simple case: loop has constant boundaries.
- // Use longs to avoid integer overflow.
+ // Use jlongs to avoid integer overflow.
int stride_con = cl->stride_con();
- long init_con = cl->init_trip()->get_int();
- long limit_con = cl->limit()->get_int();
+ jlong init_con = cl->init_trip()->get_int();
+ jlong limit_con = cl->limit()->get_int();
julong trip_cnt = cl->trip_count();
- long final_con = init_con + trip_cnt*stride_con;
+ jlong final_con = init_con + trip_cnt*stride_con;
int final_int = (int)final_con;
// The final value should be in integer range since the loop
// is counted and the limit was checked for overflow.
- assert(final_con == (long)final_int, "final value should be integer");
+ assert(final_con == (jlong)final_int, "final value should be integer");
limit = _igvn.intcon(final_int);
} else {
// Create new LoopLimit node to get exact limit (final iv value).
@@ -790,16 +790,16 @@
return NULL; // Identity
if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
- // Use longs to avoid integer overflow.
- long init_con = init_t->is_int()->get_con();
- long limit_con = limit_t->is_int()->get_con();
+ // Use jlongs to avoid integer overflow.
+ jlong init_con = init_t->is_int()->get_con();
+ jlong limit_con = limit_t->is_int()->get_con();
int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
- long trip_count = (limit_con - init_con + stride_m)/stride_con;
- long final_con = init_con + stride_con*trip_count;
+ jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
+ jlong final_con = init_con + stride_con*trip_count;
int final_int = (int)final_con;
// The final value should be in integer range since the loop
// is counted and the limit was checked for overflow.
- assert(final_con == (long)final_int, "final value should be integer");
+ assert(final_con == (jlong)final_int, "final value should be integer");
return TypeInt::make(final_int);
}
@@ -829,7 +829,7 @@
const TypeInt* init_t = phase->type(in(Init) )->is_int();
const TypeInt* limit_t = phase->type(in(Limit))->is_int();
int stride_p;
- long lim, ini;
+ jlong lim, ini;
julong max;
if (stride_con > 0) {
stride_p = stride_con;
--- a/hotspot/src/share/vm/opto/loopopts.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopopts.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -550,7 +550,7 @@
// This will likely Split-If, a higher-payoff operation.
for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
Node* use = phi->fast_out(k);
- if (use->is_Cmp() || use->is_DecodeN() || use->is_EncodeP())
+ if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
cost += ConditionalMoveLimit;
// Is there a use inside the loop?
// Note: check only basic types since CMoveP is pinned.
@@ -1006,7 +1006,7 @@
// to fold a StoreP and an AddP together (as part of an
// address expression) and the AddP and StoreP have
// different controls.
- if( !x->is_Load() && !x->is_DecodeN() ) _igvn._worklist.yank(x);
+ if (!x->is_Load() && !x->is_DecodeNarrowPtr()) _igvn._worklist.yank(x);
}
_igvn.remove_dead_node(n);
}
--- a/hotspot/src/share/vm/opto/machnode.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/machnode.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -265,7 +265,8 @@
// See if it adds up to a base + offset.
if (index != NULL) {
const Type* t_index = index->bottom_type();
- if (t_index->isa_narrowoop()) { // EncodeN, LoadN, LoadConN, LoadNKlass.
+ if (t_index->isa_narrowoop() || t_index->isa_narrowklass()) { // EncodeN, LoadN, LoadConN, LoadNKlass,
+ // EncodeNKlass, LoadConNklass.
// Memory references through narrow oops have a
// funny base so grab the type from the index:
// [R12 + narrow_oop_reg<<3 + offset]
@@ -352,6 +353,10 @@
// 32-bit unscaled narrow oop can be the base of any address expression
t = t->make_ptr();
}
+ if (UseCompressedKlassPointers && Universe::narrow_klass_shift() == 0) {
+ // 32-bit unscaled narrow oop can be the base of any address expression
+ t = t->make_ptr();
+ }
if (t->isa_intptr_t() && offset != 0 && offset != Type::OffsetBot) {
// We cannot assert that the offset does not look oop-ish here.
// Depending on the heap layout the cardmark base could land
--- a/hotspot/src/share/vm/opto/macro.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -2125,7 +2125,7 @@
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
#ifdef _LP64
- if (UseCompressedOops && klass_node->is_DecodeN()) {
+ if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
klass_node->in(1)->init_req(0, ctrl);
} else
--- a/hotspot/src/share/vm/opto/matcher.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1058,7 +1058,7 @@
Node *m = n->in(i); // Get input
int op = m->Opcode();
assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
- if( op == Op_ConI || op == Op_ConP || op == Op_ConN ||
+ if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
op == Op_ConF || op == Op_ConD || op == Op_ConL
// || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
) {
@@ -1450,7 +1450,8 @@
if (j == max_scan) // No post-domination before scan end?
return true; // Then break the match tree up
}
- if (m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) {
+ if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
+ (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
// These are commonly used in address expressions and can
// efficiently fold into them on X64 in some cases.
return false;
@@ -1574,14 +1575,14 @@
// program. The register allocator is free to split uses later to
// split live ranges.
MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
- if (!leaf->is_Con() && !leaf->is_DecodeN()) return NULL;
+ if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
// See if this Con has already been reduced using this rule.
if (_shared_nodes.Size() <= leaf->_idx) return NULL;
MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
if (last != NULL && rule == last->rule()) {
// Don't expect control change for DecodeN
- if (leaf->is_DecodeN())
+ if (leaf->is_DecodeNarrowPtr())
return last;
// Get the new space root.
Node* xroot = new_node(C->root());
@@ -1671,12 +1672,12 @@
// DecodeN node consumed by an address may have different type
// then its input. Don't compare types for such case.
if (m->adr_type() != mach_at &&
- (m->in(MemNode::Address)->is_DecodeN() ||
+ (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
m->in(MemNode::Address)->is_AddP() &&
- m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN() ||
+ m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() ||
m->in(MemNode::Address)->is_AddP() &&
m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
- m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeN())) {
+ m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) {
mach_at = m->adr_type();
}
if (m->adr_type() != mach_at) {
@@ -1721,7 +1722,7 @@
guarantee(_proj_list.size() == num_proj, "no allocation during spill generation");
}
- if (leaf->is_Con() || leaf->is_DecodeN()) {
+ if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
// Record the con for sharing
_shared_nodes.map(leaf->_idx, ex);
}
@@ -2038,7 +2039,7 @@
continue; // for(int i = ...)
}
- if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) {
+ if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) {
// Bases used in addresses must be shared but since
// they are shared through a DecodeN they may appear
// to have a single use so force sharing here.
@@ -2277,7 +2278,7 @@
if (has_new_node(val)) {
Node* new_val = new_node(val);
if (is_decoden) {
- assert(val->is_DecodeN() && val->in(0) == NULL, "sanity");
+ assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
// Note: new_val may have a control edge if
// the original ideal node DecodeN was matched before
// it was unpinned in Matcher::collect_null_checks().
--- a/hotspot/src/share/vm/opto/matcher.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/matcher.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -380,6 +380,7 @@
static const bool clone_shift_expressions;
static bool narrow_oop_use_complex_address();
+ static bool narrow_klass_use_complex_address();
// Generate implicit null check for narrow oops if it can fold
// into address expression (x64).
--- a/hotspot/src/share/vm/opto/memnode.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -714,10 +714,12 @@
continue;
case Op_DecodeN: // No change to NULL-ness, so peek thru
+ case Op_DecodeNKlass:
adr = adr->in(1);
continue;
case Op_EncodeP:
+ case Op_EncodePKlass:
// EncodeP node's control edge could be set by this method
// when EncodeP node depends on CastPP node.
//
@@ -794,6 +796,7 @@
case Op_LoadNKlass: // Loading from within a klass
case Op_ConP: // Loading from a klass
case Op_ConN: // Loading from a klass
+ case Op_ConNKlass: // Loading from a klass
case Op_CreateEx: // Sucking up the guts of an exception oop
case Op_Con: // Reading from TLS
case Op_CMoveP: // CMoveP is pinned
@@ -900,7 +903,7 @@
} else
#endif
{
- assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop");
+ assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
}
}
@@ -1894,13 +1897,13 @@
const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
assert(adr_type != NULL, "expecting TypeKlassPtr");
#ifdef _LP64
- if (adr_type->is_ptr_to_narrowoop()) {
+ if (adr_type->is_ptr_to_narrowklass()) {
assert(UseCompressedKlassPointers, "no compressed klasses");
- Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowoop()));
- return new (C) DecodeNNode(load_klass, load_klass->bottom_type()->make_ptr());
+ Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
+ return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
}
#endif
- assert(!adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
+ assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
return new (C) LoadKlassNode(ctl, mem, adr, at, tk);
}
@@ -2110,7 +2113,7 @@
if (t == Type::TOP)
return t;
- return t->make_narrowoop();
+ return t->make_narrowklass();
}
//------------------------------Identity---------------------------------------
@@ -2121,9 +2124,10 @@
const Type *t = phase->type( x );
if( t == Type::TOP ) return x;
- if( t->isa_narrowoop()) return x;
-
- return phase->transform(new (phase->C) EncodePNode(x, t->make_narrowoop()));
+ if( t->isa_narrowklass()) return x;
+ assert (!t->isa_narrowoop(), "no narrow oop here");
+
+ return phase->transform(new (phase->C) EncodePKlassNode(x, t->make_narrowklass()));
}
//------------------------------Value-----------------------------------------
@@ -2228,12 +2232,15 @@
case T_ADDRESS:
case T_OBJECT:
#ifdef _LP64
- if (adr->bottom_type()->is_ptr_to_narrowoop() ||
- (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
- adr->bottom_type()->isa_rawptr())) {
+ if (adr->bottom_type()->is_ptr_to_narrowoop()) {
val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
- } else
+ } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
+ (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
+ adr->bottom_type()->isa_rawptr())) {
+ val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
+ return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
+ }
#endif
{
return new (C) StorePNode(ctl, mem, adr, adr_type, val);
--- a/hotspot/src/share/vm/opto/memnode.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/memnode.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -274,18 +274,6 @@
virtual BasicType memory_type() const { return T_INT; }
};
-//------------------------------LoadUI2LNode-----------------------------------
-// Load an unsigned integer into long from memory
-class LoadUI2LNode : public LoadNode {
-public:
- LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
- : LoadNode(c, mem, adr, at, t) {}
- virtual int Opcode() const;
- virtual uint ideal_reg() const { return Op_RegL; }
- virtual int store_Opcode() const { return Op_StoreL; }
- virtual BasicType memory_type() const { return T_LONG; }
-};
-
//------------------------------LoadRangeNode----------------------------------
// Load an array length from the array
class LoadRangeNode : public LoadINode {
@@ -437,12 +425,12 @@
// Load a narrow Klass from an object.
class LoadNKlassNode : public LoadNNode {
public:
- LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
+ LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk )
: LoadNNode(c,mem,adr,at,tk) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
- virtual int store_Opcode() const { return Op_StoreN; }
- virtual BasicType memory_type() const { return T_NARROWOOP; }
+ virtual int store_Opcode() const { return Op_StoreNKlass; }
+ virtual BasicType memory_type() const { return T_NARROWKLASS; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
@@ -593,6 +581,15 @@
virtual BasicType memory_type() const { return T_NARROWOOP; }
};
+//------------------------------StoreNKlassNode--------------------------------------
+// Store narrow klass to memory
+class StoreNKlassNode : public StoreNNode {
+public:
+ StoreNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNNode(c,mem,adr,at,val) {}
+ virtual int Opcode() const;
+ virtual BasicType memory_type() const { return T_NARROWKLASS; }
+};
+
//------------------------------StoreCMNode-----------------------------------
// Store card-mark byte to memory for CM
// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
--- a/hotspot/src/share/vm/opto/mulnode.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/mulnode.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -599,20 +599,6 @@
Node* in1 = in(1);
uint op = in1->Opcode();
- // Masking sign bits off of an integer? Do an unsigned integer to
- // long load.
- // NOTE: This check must be *before* we try to convert the AndLNode
- // to an AndINode and commute it with ConvI2LNode because
- // 0xFFFFFFFFL masks the whole integer and we get a sign extension,
- // which is wrong.
- if (op == Op_ConvI2L && in1->in(1)->Opcode() == Op_LoadI && mask == CONST64(0x00000000FFFFFFFF)) {
- Node* load = in1->in(1);
- return new (phase->C) LoadUI2LNode(load->in(MemNode::Control),
- load->in(MemNode::Memory),
- load->in(MemNode::Address),
- load->adr_type());
- }
-
// Are we masking a long that was converted from an int with a mask
// that fits in 32-bits? Commute them and use an AndINode. Don't
// convert masks which would cause a sign extension of the integer
--- a/hotspot/src/share/vm/opto/node.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -62,8 +62,12 @@
class ConNode;
class CountedLoopNode;
class CountedLoopEndNode;
+class DecodeNarrowPtrNode;
class DecodeNNode;
+class DecodeNKlassNode;
+class EncodeNarrowPtrNode;
class EncodePNode;
+class EncodePKlassNode;
class FastLockNode;
class FastUnlockNode;
class IfNode;
@@ -585,8 +589,12 @@
DEFINE_CLASS_ID(CheckCastPP, Type, 2)
DEFINE_CLASS_ID(CMove, Type, 3)
DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
- DEFINE_CLASS_ID(DecodeN, Type, 5)
- DEFINE_CLASS_ID(EncodeP, Type, 6)
+ DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
+ DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
+ DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
+ DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
+ DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
+ DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
DEFINE_CLASS_ID(Proj, Node, 3)
DEFINE_CLASS_ID(CatchProj, Proj, 0)
@@ -706,8 +714,12 @@
DEFINE_CLASS_QUERY(Cmp)
DEFINE_CLASS_QUERY(CountedLoop)
DEFINE_CLASS_QUERY(CountedLoopEnd)
+ DEFINE_CLASS_QUERY(DecodeNarrowPtr)
DEFINE_CLASS_QUERY(DecodeN)
+ DEFINE_CLASS_QUERY(DecodeNKlass)
+ DEFINE_CLASS_QUERY(EncodeNarrowPtr)
DEFINE_CLASS_QUERY(EncodeP)
+ DEFINE_CLASS_QUERY(EncodePKlass)
DEFINE_CLASS_QUERY(FastLock)
DEFINE_CLASS_QUERY(FastUnlock)
DEFINE_CLASS_QUERY(If)
--- a/hotspot/src/share/vm/opto/parse1.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/parse1.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1381,8 +1381,7 @@
// that occur during parsing of this BC. If there is no log
// output until the next context string, this context string
// will be silently ignored.
- log->context()->reset();
- log->context()->print_cr("<bc code='%d' bci='%d'/>", (int)bc(), bci());
+ log->set_context("bc code='%d' bci='%d'", (int)bc(), bci());
}
if (block()->has_trap_at(bci())) {
@@ -1411,7 +1410,8 @@
NOT_PRODUCT( parse_histogram()->record_change(); );
- if (log != NULL) log->context()->reset(); // done w/ this one
+ if (log != NULL)
+ log->clear_context(); // skip marker if nothing was printed
// Fall into next bytecode. Each bytecode normally has 1 sequential
// successor which is typically made ready by visiting this bytecode.
--- a/hotspot/src/share/vm/opto/parse2.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/parse2.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1239,7 +1239,7 @@
static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
Node* ldk;
- if (n->is_DecodeN()) {
+ if (n->is_DecodeNKlass()) {
if (n->in(1)->Opcode() != Op_LoadNKlass) {
return NULL;
} else {
--- a/hotspot/src/share/vm/opto/phaseX.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -497,8 +497,8 @@
#ifndef PRODUCT
protected:
// Sub-quadratic implementation of VerifyIterativeGVN.
- unsigned long _verify_counter;
- unsigned long _verify_full_passes;
+ julong _verify_counter;
+ julong _verify_full_passes;
enum { _verify_window_size = 30 };
Node* _verify_window[_verify_window_size];
void verify_step(Node* n);
--- a/hotspot/src/share/vm/opto/subnode.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/subnode.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -789,7 +789,7 @@
// Now check for LoadKlass on left.
Node* ldk1 = in(1);
- if (ldk1->is_DecodeN()) {
+ if (ldk1->is_DecodeNKlass()) {
ldk1 = ldk1->in(1);
if (ldk1->Opcode() != Op_LoadNKlass )
return NULL;
@@ -814,7 +814,7 @@
// Check for a LoadKlass from primary supertype array.
// Any nested loadklass from loadklass+con must be from the p.s. array.
- if (ldk2->is_DecodeN()) {
+ if (ldk2->is_DecodeNKlass()) {
// Keep ldk2 as DecodeN since it could be used in CmpP below.
if (ldk2->in(1)->Opcode() != Op_LoadNKlass )
return NULL;
--- a/hotspot/src/share/vm/opto/superword.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/superword.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -179,7 +179,6 @@
for (int i = 0; i < _block.length(); i++) {
Node* n = _block.at(i);
if (n->is_Mem() && !n->is_LoadStore() && in_bb(n) &&
- n->Opcode() != Op_LoadUI2L &&
is_java_primitive(n->as_Mem()->memory_type())) {
int align = memory_alignment(n->as_Mem(), 0);
if (align != bottom_align) {
--- a/hotspot/src/share/vm/opto/type.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/type.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -57,6 +57,7 @@
{ Bad, T_LONG, "long:", false, Op_RegL, relocInfo::none }, // Long
{ Half, T_VOID, "half", false, 0, relocInfo::none }, // Half
{ Bad, T_NARROWOOP, "narrowoop:", false, Op_RegN, relocInfo::none }, // NarrowOop
+ { Bad, T_NARROWKLASS,"narrowklass:", false, Op_RegN, relocInfo::none }, // NarrowKlass
{ Bad, T_ILLEGAL, "tuple:", false, Node::NotAMachineReg, relocInfo::none }, // Tuple
{ Bad, T_ARRAY, "array:", false, Node::NotAMachineReg, relocInfo::none }, // Array
@@ -332,6 +333,8 @@
TypeNarrowOop::NULL_PTR = TypeNarrowOop::make( TypePtr::NULL_PTR );
TypeNarrowOop::BOTTOM = TypeNarrowOop::make( TypeInstPtr::BOTTOM );
+ TypeNarrowKlass::NULL_PTR = TypeNarrowKlass::make( TypePtr::NULL_PTR );
+
mreg2type[Op_Node] = Type::BOTTOM;
mreg2type[Op_Set ] = 0;
mreg2type[Op_RegN] = TypeNarrowOop::BOTTOM;
@@ -395,34 +398,36 @@
longpair[1] = TypeLong::LONG;
TypeTuple::LONG_PAIR = TypeTuple::make(2, longpair);
- _const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM;
- _const_basic_type[T_BOOLEAN] = TypeInt::BOOL;
- _const_basic_type[T_CHAR] = TypeInt::CHAR;
- _const_basic_type[T_BYTE] = TypeInt::BYTE;
- _const_basic_type[T_SHORT] = TypeInt::SHORT;
- _const_basic_type[T_INT] = TypeInt::INT;
- _const_basic_type[T_LONG] = TypeLong::LONG;
- _const_basic_type[T_FLOAT] = Type::FLOAT;
- _const_basic_type[T_DOUBLE] = Type::DOUBLE;
- _const_basic_type[T_OBJECT] = TypeInstPtr::BOTTOM;
- _const_basic_type[T_ARRAY] = TypeInstPtr::BOTTOM; // there is no separate bottom for arrays
- _const_basic_type[T_VOID] = TypePtr::NULL_PTR; // reflection represents void this way
- _const_basic_type[T_ADDRESS] = TypeRawPtr::BOTTOM; // both interpreter return addresses & random raw ptrs
- _const_basic_type[T_CONFLICT]= Type::BOTTOM; // why not?
-
- _zero_type[T_NARROWOOP] = TypeNarrowOop::NULL_PTR;
- _zero_type[T_BOOLEAN] = TypeInt::ZERO; // false == 0
- _zero_type[T_CHAR] = TypeInt::ZERO; // '\0' == 0
- _zero_type[T_BYTE] = TypeInt::ZERO; // 0x00 == 0
- _zero_type[T_SHORT] = TypeInt::ZERO; // 0x0000 == 0
- _zero_type[T_INT] = TypeInt::ZERO;
- _zero_type[T_LONG] = TypeLong::ZERO;
- _zero_type[T_FLOAT] = TypeF::ZERO;
- _zero_type[T_DOUBLE] = TypeD::ZERO;
- _zero_type[T_OBJECT] = TypePtr::NULL_PTR;
- _zero_type[T_ARRAY] = TypePtr::NULL_PTR; // null array is null oop
- _zero_type[T_ADDRESS] = TypePtr::NULL_PTR; // raw pointers use the same null
- _zero_type[T_VOID] = Type::TOP; // the only void value is no value at all
+ _const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM;
+ _const_basic_type[T_NARROWKLASS] = Type::BOTTOM;
+ _const_basic_type[T_BOOLEAN] = TypeInt::BOOL;
+ _const_basic_type[T_CHAR] = TypeInt::CHAR;
+ _const_basic_type[T_BYTE] = TypeInt::BYTE;
+ _const_basic_type[T_SHORT] = TypeInt::SHORT;
+ _const_basic_type[T_INT] = TypeInt::INT;
+ _const_basic_type[T_LONG] = TypeLong::LONG;
+ _const_basic_type[T_FLOAT] = Type::FLOAT;
+ _const_basic_type[T_DOUBLE] = Type::DOUBLE;
+ _const_basic_type[T_OBJECT] = TypeInstPtr::BOTTOM;
+ _const_basic_type[T_ARRAY] = TypeInstPtr::BOTTOM; // there is no separate bottom for arrays
+ _const_basic_type[T_VOID] = TypePtr::NULL_PTR; // reflection represents void this way
+ _const_basic_type[T_ADDRESS] = TypeRawPtr::BOTTOM; // both interpreter return addresses & random raw ptrs
+ _const_basic_type[T_CONFLICT] = Type::BOTTOM; // why not?
+
+ _zero_type[T_NARROWOOP] = TypeNarrowOop::NULL_PTR;
+ _zero_type[T_NARROWKLASS] = TypeNarrowKlass::NULL_PTR;
+ _zero_type[T_BOOLEAN] = TypeInt::ZERO; // false == 0
+ _zero_type[T_CHAR] = TypeInt::ZERO; // '\0' == 0
+ _zero_type[T_BYTE] = TypeInt::ZERO; // 0x00 == 0
+ _zero_type[T_SHORT] = TypeInt::ZERO; // 0x0000 == 0
+ _zero_type[T_INT] = TypeInt::ZERO;
+ _zero_type[T_LONG] = TypeLong::ZERO;
+ _zero_type[T_FLOAT] = TypeF::ZERO;
+ _zero_type[T_DOUBLE] = TypeD::ZERO;
+ _zero_type[T_OBJECT] = TypePtr::NULL_PTR;
+ _zero_type[T_ARRAY] = TypePtr::NULL_PTR; // null array is null oop
+ _zero_type[T_ADDRESS] = TypePtr::NULL_PTR; // raw pointers use the same null
+ _zero_type[T_VOID] = Type::TOP; // the only void value is no value at all
// get_zero_type() should not happen for T_CONFLICT
_zero_type[T_CONFLICT]= NULL;
@@ -563,9 +568,14 @@
const Type* result = make_ptr()->meet(t->make_ptr());
return result->make_narrowoop();
}
+ if (isa_narrowklass() && t->isa_narrowklass()) {
+ const Type* result = make_ptr()->meet(t->make_ptr());
+ return result->make_narrowklass();
+ }
const Type *mt = xmeet(t);
if (isa_narrowoop() || t->isa_narrowoop()) return mt;
+ if (isa_narrowklass() || t->isa_narrowklass()) return mt;
#ifdef ASSERT
assert( mt == t->xmeet(this), "meet not commutative" );
const Type* dual_join = mt->_dual;
@@ -635,6 +645,9 @@
case NarrowOop:
return t->xmeet(this);
+ case NarrowKlass:
+ return t->xmeet(this);
+
case Bad: // Type check
default: // Bogus type not in lattice
typerr(t);
@@ -693,6 +706,7 @@
Bad, // Long - handled in v-call
Half, // Half
Bad, // NarrowOop - handled in v-call
+ Bad, // NarrowKlass - handled in v-call
Bad, // Tuple - handled in v-call
Bad, // Array - handled in v-call
@@ -756,6 +770,8 @@
dump2(d,1, st);
if (is_ptr_to_narrowoop()) {
st->print(" [narrow]");
+ } else if (is_ptr_to_narrowklass()) {
+ st->print(" [narrowklass]");
}
}
#endif
@@ -838,6 +854,7 @@
case MetadataPtr:
case KlassPtr:
case NarrowOop:
+ case NarrowKlass:
case Int:
case Long:
case DoubleTop:
@@ -955,6 +972,7 @@
case MetadataPtr:
case KlassPtr:
case NarrowOop:
+ case NarrowKlass:
case Int:
case Long:
case FloatTop:
@@ -1109,6 +1127,7 @@
case MetadataPtr:
case KlassPtr:
case NarrowOop:
+ case NarrowKlass:
case Long:
case FloatTop:
case FloatCon:
@@ -1366,6 +1385,7 @@
case MetadataPtr:
case KlassPtr:
case NarrowOop:
+ case NarrowKlass:
case Int:
case FloatTop:
case FloatCon:
@@ -2096,6 +2116,7 @@
case DoubleCon:
case DoubleBot:
case NarrowOop:
+ case NarrowKlass:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -2350,17 +2371,18 @@
_const_oop(o), _klass(k),
_klass_is_exact(xk),
_is_ptr_to_narrowoop(false),
+ _is_ptr_to_narrowklass(false),
_instance_id(instance_id) {
#ifdef _LP64
- if (UseCompressedOops && _offset != 0) {
+ if (_offset != 0) {
if (_offset == oopDesc::klass_offset_in_bytes()) {
- _is_ptr_to_narrowoop = UseCompressedKlassPointers;
+ _is_ptr_to_narrowklass = UseCompressedKlassPointers;
} else if (klass() == NULL) {
// Array with unknown body type
assert(this->isa_aryptr(), "only arrays without klass");
- _is_ptr_to_narrowoop = true;
+ _is_ptr_to_narrowoop = UseCompressedOops;
} else if (this->isa_aryptr()) {
- _is_ptr_to_narrowoop = (klass()->is_obj_array_klass() &&
+ _is_ptr_to_narrowoop = (UseCompressedOops && klass()->is_obj_array_klass() &&
_offset != arrayOopDesc::length_offset_in_bytes());
} else if (klass()->is_instance_klass()) {
ciInstanceKlass* ik = klass()->as_instance_klass();
@@ -2369,7 +2391,7 @@
// Perm objects don't use compressed references
} else if (_offset == OffsetBot || _offset == OffsetTop) {
// unsafe access
- _is_ptr_to_narrowoop = true;
+ _is_ptr_to_narrowoop = UseCompressedOops;
} else { // exclude unsafe ops
assert(this->isa_instptr(), "must be an instance ptr.");
@@ -2387,22 +2409,22 @@
ciField* field = k->get_field_by_offset(_offset, true);
assert(field != NULL, "missing field");
BasicType basic_elem_type = field->layout_type();
- _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT ||
- basic_elem_type == T_ARRAY);
+ _is_ptr_to_narrowoop = UseCompressedOops && (basic_elem_type == T_OBJECT ||
+ basic_elem_type == T_ARRAY);
} else {
// Instance fields which contains a compressed oop references.
field = ik->get_field_by_offset(_offset, false);
if (field != NULL) {
BasicType basic_elem_type = field->layout_type();
- _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT ||
- basic_elem_type == T_ARRAY);
+ _is_ptr_to_narrowoop = UseCompressedOops && (basic_elem_type == T_OBJECT ||
+ basic_elem_type == T_ARRAY);
} else if (klass()->equals(ciEnv::current()->Object_klass())) {
// Compile::find_alias_type() cast exactness on all types to verify
// that it does not affect alias type.
- _is_ptr_to_narrowoop = true;
+ _is_ptr_to_narrowoop = UseCompressedOops;
} else {
// Type for the copy start in LibraryCallKit::inline_native_clone().
- _is_ptr_to_narrowoop = true;
+ _is_ptr_to_narrowoop = UseCompressedOops;
}
}
}
@@ -2475,6 +2497,7 @@
case DoubleCon:
case DoubleBot:
case NarrowOop:
+ case NarrowKlass:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -2925,6 +2948,7 @@
case DoubleCon:
case DoubleBot:
case NarrowOop:
+ case NarrowKlass:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -3353,6 +3377,7 @@
case T_NARROWOOP:
etype = T_OBJECT;
break;
+ case T_NARROWKLASS:
case T_CONFLICT:
case T_ILLEGAL:
case T_VOID:
@@ -3425,6 +3450,7 @@
case DoubleCon:
case DoubleBot:
case NarrowOop:
+ case NarrowKlass:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -3671,23 +3697,27 @@
//=============================================================================
-const TypeNarrowOop *TypeNarrowOop::BOTTOM;
-const TypeNarrowOop *TypeNarrowOop::NULL_PTR;
-
-
-const TypeNarrowOop* TypeNarrowOop::make(const TypePtr* type) {
- return (const TypeNarrowOop*)(new TypeNarrowOop(type))->hashcons();
-}
//------------------------------hash-------------------------------------------
// Type-specific hashing function.
-int TypeNarrowOop::hash(void) const {
+int TypeNarrowPtr::hash(void) const {
return _ptrtype->hash() + 7;
}
-
-bool TypeNarrowOop::eq( const Type *t ) const {
- const TypeNarrowOop* tc = t->isa_narrowoop();
+bool TypeNarrowPtr::singleton(void) const { // TRUE if type is a singleton
+ return _ptrtype->singleton();
+}
+
+bool TypeNarrowPtr::empty(void) const {
+ return _ptrtype->empty();
+}
+
+intptr_t TypeNarrowPtr::get_con() const {
+ return _ptrtype->get_con();
+}
+
+bool TypeNarrowPtr::eq( const Type *t ) const {
+ const TypeNarrowPtr* tc = isa_same_narrowptr(t);
if (tc != NULL) {
if (_ptrtype->base() != tc->_ptrtype->base()) {
return false;
@@ -3697,22 +3727,46 @@
return false;
}
-bool TypeNarrowOop::singleton(void) const { // TRUE if type is a singleton
- return _ptrtype->singleton();
-}
-
-bool TypeNarrowOop::empty(void) const {
- return _ptrtype->empty();
+const Type *TypeNarrowPtr::xdual() const { // Compute dual right now.
+ const TypePtr* odual = _ptrtype->dual()->is_ptr();
+ return make_same_narrowptr(odual);
+}
+
+
+const Type *TypeNarrowPtr::filter( const Type *kills ) const {
+ if (isa_same_narrowptr(kills)) {
+ const Type* ft =_ptrtype->filter(is_same_narrowptr(kills)->_ptrtype);
+ if (ft->empty())
+ return Type::TOP; // Canonical empty value
+ if (ft->isa_ptr()) {
+ return make_hash_same_narrowptr(ft->isa_ptr());
+ }
+ return ft;
+ } else if (kills->isa_ptr()) {
+ const Type* ft = _ptrtype->join(kills);
+ if (ft->empty())
+ return Type::TOP; // Canonical empty value
+ return ft;
+ } else {
+ return Type::TOP;
+ }
}
//------------------------------xmeet------------------------------------------
// Compute the MEET of two types. It returns a new Type object.
-const Type *TypeNarrowOop::xmeet( const Type *t ) const {
+const Type *TypeNarrowPtr::xmeet( const Type *t ) const {
// Perform a fast test for common case; meeting the same types together.
if( this == t ) return this; // Meeting same type-rep?
-
- // Current "this->_base" is OopPtr
+ if (t->base() == base()) {
+ const Type* result = _ptrtype->xmeet(t->make_ptr());
+ if (result->isa_ptr()) {
+ return make_hash_same_narrowptr(result->is_ptr());
+ }
+ return result;
+ }
+
+ // Current "this->_base" is NarrowKlass or NarrowOop
switch (t->base()) { // switch on original type
case Int: // Mixing ints & oops happens when javac
@@ -3730,20 +3784,14 @@
case AryPtr:
case MetadataPtr:
case KlassPtr:
+ case NarrowOop:
+ case NarrowKlass:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
return this;
- case NarrowOop: {
- const Type* result = _ptrtype->xmeet(t->make_ptr());
- if (result->isa_ptr()) {
- return TypeNarrowOop::make(result->is_ptr());
- }
- return result;
- }
-
default: // All else is a mistake
typerr(t);
@@ -3752,42 +3800,40 @@
return this;
}
-const Type *TypeNarrowOop::xdual() const { // Compute dual right now.
- const TypePtr* odual = _ptrtype->dual()->is_ptr();
- return new TypeNarrowOop(odual);
-}
-
-const Type *TypeNarrowOop::filter( const Type *kills ) const {
- if (kills->isa_narrowoop()) {
- const Type* ft =_ptrtype->filter(kills->is_narrowoop()->_ptrtype);
- if (ft->empty())
- return Type::TOP; // Canonical empty value
- if (ft->isa_ptr()) {
- return make(ft->isa_ptr());
- }
- return ft;
- } else if (kills->isa_ptr()) {
- const Type* ft = _ptrtype->join(kills);
- if (ft->empty())
- return Type::TOP; // Canonical empty value
- return ft;
- } else {
- return Type::TOP;
- }
-}
-
-
-intptr_t TypeNarrowOop::get_con() const {
- return _ptrtype->get_con();
-}
+#ifndef PRODUCT
+void TypeNarrowPtr::dump2( Dict & d, uint depth, outputStream *st ) const {
+ _ptrtype->dump2(d, depth, st);
+}
+#endif
+
+const TypeNarrowOop *TypeNarrowOop::BOTTOM;
+const TypeNarrowOop *TypeNarrowOop::NULL_PTR;
+
+
+const TypeNarrowOop* TypeNarrowOop::make(const TypePtr* type) {
+ return (const TypeNarrowOop*)(new TypeNarrowOop(type))->hashcons();
+}
+
#ifndef PRODUCT
void TypeNarrowOop::dump2( Dict & d, uint depth, outputStream *st ) const {
st->print("narrowoop: ");
- _ptrtype->dump2(d, depth, st);
+ TypeNarrowPtr::dump2(d, depth, st);
}
#endif
+const TypeNarrowKlass *TypeNarrowKlass::NULL_PTR;
+
+const TypeNarrowKlass* TypeNarrowKlass::make(const TypePtr* type) {
+ return (const TypeNarrowKlass*)(new TypeNarrowKlass(type))->hashcons();
+}
+
+#ifndef PRODUCT
+void TypeNarrowKlass::dump2( Dict & d, uint depth, outputStream *st ) const {
+ st->print("narrowklass: ");
+ TypeNarrowPtr::dump2(d, depth, st);
+}
+#endif
//------------------------------eq---------------------------------------------
@@ -3878,6 +3924,7 @@
case DoubleCon:
case DoubleBot:
case NarrowOop:
+ case NarrowKlass:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -4169,6 +4216,7 @@
case DoubleCon:
case DoubleBot:
case NarrowOop:
+ case NarrowKlass:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
--- a/hotspot/src/share/vm/opto/type.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/opto/type.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -48,7 +48,9 @@
class TypeF;
class TypeInt;
class TypeLong;
-class TypeNarrowOop;
+class TypeNarrowPtr;
+class TypeNarrowOop;
+class TypeNarrowKlass;
class TypeAry;
class TypeTuple;
class TypeVect;
@@ -81,6 +83,7 @@
Long, // Long integer range (lo-hi)
Half, // Placeholder half of doubleword
NarrowOop, // Compressed oop pointer
+ NarrowKlass, // Compressed klass pointer
Tuple, // Method signature or object layout
Array, // Array types
@@ -229,6 +232,7 @@
// Returns true if this pointer points at memory which contains a
// compressed oop references.
bool is_ptr_to_narrowoop() const;
+ bool is_ptr_to_narrowklass() const;
// Convenience access
float getf() const;
@@ -252,6 +256,8 @@
const TypeRawPtr *is_rawptr() const; // Asserts is rawptr
const TypeNarrowOop *is_narrowoop() const; // Java-style GC'd pointer
const TypeNarrowOop *isa_narrowoop() const; // Returns NULL if not oop ptr type
+ const TypeNarrowKlass *is_narrowklass() const; // compressed klass pointer
+ const TypeNarrowKlass *isa_narrowklass() const;// Returns NULL if not oop ptr type
const TypeOopPtr *isa_oopptr() const; // Returns NULL if not oop ptr type
const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer
const TypeInstPtr *isa_instptr() const; // Returns NULL if not InstPtr
@@ -278,6 +284,10 @@
// of this pointer type.
const TypeNarrowOop* make_narrowoop() const;
+ // Returns this compressed klass pointer or the equivalent
+ // compressed version of this pointer type.
+ const TypeNarrowKlass* make_narrowklass() const;
+
// Special test for register pressure heuristic
bool is_floatingpoint() const; // True if Float or Double base type
@@ -670,7 +680,7 @@
// Otherwise the _base will indicate which subset of pointers is affected,
// and the class will be inherited from.
class TypePtr : public Type {
- friend class TypeNarrowOop;
+ friend class TypeNarrowPtr;
public:
enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR };
protected:
@@ -781,6 +791,7 @@
// Does the type exclude subclasses of the klass? (Inexact == polymorphic.)
bool _klass_is_exact;
bool _is_ptr_to_narrowoop;
+ bool _is_ptr_to_narrowklass;
// If not InstanceTop or InstanceBot, indicates that this is
// a particular instance of this type which is distinct.
@@ -825,6 +836,7 @@
// Returns true if this pointer points at memory which contains a
// compressed oop references.
bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; }
+ bool is_ptr_to_narrowklass_nv() const { return _is_ptr_to_narrowklass; }
bool is_known_instance() const { return _instance_id > 0; }
int instance_id() const { return _instance_id; }
@@ -1122,22 +1134,21 @@
#endif
};
-//------------------------------TypeNarrowOop----------------------------------
-// A compressed reference to some kind of Oop. This type wraps around
-// a preexisting TypeOopPtr and forwards most of it's operations to
-// the underlying type. It's only real purpose is to track the
-// oopness of the compressed oop value when we expose the conversion
-// between the normal and the compressed form.
-class TypeNarrowOop : public Type {
+class TypeNarrowPtr : public Type {
protected:
const TypePtr* _ptrtype; // Could be TypePtr::NULL_PTR
- TypeNarrowOop( const TypePtr* ptrtype): Type(NarrowOop),
- _ptrtype(ptrtype) {
+ TypeNarrowPtr(TYPES t, const TypePtr* ptrtype): _ptrtype(ptrtype),
+ Type(t) {
assert(ptrtype->offset() == 0 ||
ptrtype->offset() == OffsetBot ||
ptrtype->offset() == OffsetTop, "no real offsets");
}
+
+ virtual const TypeNarrowPtr *isa_same_narrowptr(const Type *t) const = 0;
+ virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const = 0;
+ virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const = 0;
+ virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const = 0;
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
@@ -1153,19 +1164,89 @@
virtual bool empty(void) const; // TRUE if type is vacuous
+ // returns the equivalent ptr type for this compressed pointer
+ const TypePtr *get_ptrtype() const {
+ return _ptrtype;
+ }
+
+#ifndef PRODUCT
+ virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+#endif
+};
+
+//------------------------------TypeNarrowOop----------------------------------
+// A compressed reference to some kind of Oop. This type wraps around
+// a preexisting TypeOopPtr and forwards most of it's operations to
+// the underlying type. It's only real purpose is to track the
+// oopness of the compressed oop value when we expose the conversion
+// between the normal and the compressed form.
+class TypeNarrowOop : public TypeNarrowPtr {
+protected:
+ TypeNarrowOop( const TypePtr* ptrtype): TypeNarrowPtr(NarrowOop, ptrtype) {
+ }
+
+ virtual const TypeNarrowPtr *isa_same_narrowptr(const Type *t) const {
+ return t->isa_narrowoop();
+ }
+
+ virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const {
+ return t->is_narrowoop();
+ }
+
+ virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const {
+ return new TypeNarrowOop(t);
+ }
+
+ virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const {
+ return (const TypeNarrowPtr*)((new TypeNarrowOop(t))->hashcons());
+ }
+
+public:
+
static const TypeNarrowOop *make( const TypePtr* type);
static const TypeNarrowOop* make_from_constant(ciObject* con, bool require_constant = false) {
return make(TypeOopPtr::make_from_constant(con, require_constant));
}
- // returns the equivalent ptr type for this compressed pointer
- const TypePtr *get_ptrtype() const {
- return _ptrtype;
+ static const TypeNarrowOop *BOTTOM;
+ static const TypeNarrowOop *NULL_PTR;
+
+#ifndef PRODUCT
+ virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+#endif
+};
+
+//------------------------------TypeNarrowKlass----------------------------------
+// A compressed reference to klass pointer. This type wraps around a
+// preexisting TypeKlassPtr and forwards most of it's operations to
+// the underlying type.
+class TypeNarrowKlass : public TypeNarrowPtr {
+protected:
+ TypeNarrowKlass( const TypePtr* ptrtype): TypeNarrowPtr(NarrowKlass, ptrtype) {
}
- static const TypeNarrowOop *BOTTOM;
- static const TypeNarrowOop *NULL_PTR;
+ virtual const TypeNarrowPtr *isa_same_narrowptr(const Type *t) const {
+ return t->isa_narrowklass();
+ }
+
+ virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const {
+ return t->is_narrowklass();
+ }
+
+ virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const {
+ return new TypeNarrowKlass(t);
+ }
+
+ virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const {
+ return (const TypeNarrowPtr*)((new TypeNarrowKlass(t))->hashcons());
+ }
+
+public:
+ static const TypeNarrowKlass *make( const TypePtr* type);
+
+ // static const TypeNarrowKlass *BOTTOM;
+ static const TypeNarrowKlass *NULL_PTR;
#ifndef PRODUCT
virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
@@ -1221,6 +1302,14 @@
#endif
}
+inline bool Type::is_ptr_to_narrowklass() const {
+#ifdef _LP64
+ return (isa_oopptr() != NULL && is_oopptr()->is_ptr_to_narrowklass_nv());
+#else
+ return false;
+#endif
+}
+
inline float Type::getf() const {
assert( _base == FloatCon, "Not a FloatCon" );
return ((TypeF*)this)->_f;
@@ -1346,6 +1435,15 @@
return (_base == NarrowOop) ? (TypeNarrowOop*)this : NULL;
}
+inline const TypeNarrowKlass *Type::is_narrowklass() const {
+ assert(_base == NarrowKlass, "Not a narrow oop" ) ;
+ return (TypeNarrowKlass*)this;
+}
+
+inline const TypeNarrowKlass *Type::isa_narrowklass() const {
+ return (_base == NarrowKlass) ? (TypeNarrowKlass*)this : NULL;
+}
+
inline const TypeMetadataPtr *Type::is_metadataptr() const {
// MetadataPtr is the first and CPCachePtr the last
assert(_base == MetadataPtr, "Not a metadata pointer" ) ;
@@ -1367,7 +1465,8 @@
inline const TypePtr* Type::make_ptr() const {
return (_base == NarrowOop) ? is_narrowoop()->get_ptrtype() :
- (isa_ptr() ? is_ptr() : NULL);
+ ((_base == NarrowKlass) ? is_narrowklass()->get_ptrtype() :
+ (isa_ptr() ? is_ptr() : NULL));
}
inline const TypeOopPtr* Type::make_oopptr() const {
@@ -1379,6 +1478,11 @@
(isa_ptr() ? TypeNarrowOop::make(is_ptr()) : NULL);
}
+inline const TypeNarrowKlass* Type::make_narrowklass() const {
+ return (_base == NarrowKlass) ? is_narrowklass() :
+ (isa_ptr() ? TypeNarrowKlass::make(is_ptr()) : NULL);
+}
+
inline bool Type::is_floatingpoint() const {
if( (_base == FloatCon) || (_base == FloatBot) ||
(_base == DoubleCon) || (_base == DoubleBot) )
--- a/hotspot/src/share/vm/prims/methodHandles.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -563,15 +563,6 @@
return Klass::cast(SystemDictionary::Class_klass())->java_mirror();
} else if (s == vmSymbols::string_signature()) {
return Klass::cast(SystemDictionary::String_klass())->java_mirror();
- } else {
- int len = s->utf8_length();
- if (s->byte_at(0) == 'L' && s->byte_at(len-1) == ';') {
- TempNewSymbol cname = SymbolTable::probe((const char*)&s->bytes()[1], len-2);
- if (cname == NULL) return NULL;
- Klass* wkk = SystemDictionary::find_well_known_klass(cname);
- if (wkk == NULL) return NULL;
- return Klass::cast(wkk)->java_mirror();
- }
}
}
return NULL;
--- a/hotspot/src/share/vm/prims/methodHandles.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -196,7 +196,27 @@
# include "methodHandles_ppc.hpp"
#endif
-
+ // Tracing
+ static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
+ static void trace_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid) {
+ if (TraceMethodHandles) {
+ const char* name = vmIntrinsics::name_at(iid);
+ if (*name == '_') name += 1;
+ const size_t len = strlen(name) + 50;
+ char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
+ const char* suffix = "";
+ if (is_signature_polymorphic(iid)) {
+ if (is_signature_polymorphic_static(iid))
+ suffix = "/static";
+ else
+ suffix = "/private";
+ }
+ jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
+ trace_method_handle(_masm, qname);
+ // Note: Don't free the allocated char array because it's used
+ // during runtime.
+ }
+ }
};
--- a/hotspot/src/share/vm/runtime/arguments.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -1423,10 +1423,9 @@
FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
} else {
// Turn on UseCompressedKlassPointers too
- // The compiler is broken for this so turn it on when the compiler is fixed.
- // if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
- // FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
- // }
+ if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
+ FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
+ }
// Set the ClassMetaspaceSize to something that will not need to be
// expanded, since it cannot be expanded.
if (UseCompressedKlassPointers && FLAG_IS_DEFAULT(ClassMetaspaceSize)) {
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -233,6 +233,7 @@
return_value = Handle(thread, result);
assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
if (TraceDeoptimization) {
+ ttyLocker ttyl;
tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
}
}
@@ -493,6 +494,7 @@
if (array->frames() > 1) {
if (VerifyStack && TraceDeoptimization) {
+ ttyLocker ttyl;
tty->print_cr("Deoptimizing method containing inlining");
}
}
@@ -573,6 +575,7 @@
#ifndef PRODUCT
if (TraceDeoptimization) {
+ ttyLocker ttyl;
tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
}
#endif
@@ -1322,9 +1325,9 @@
if (TraceDeoptimization) { // make noise on the tty
tty->print("Uncommon trap occurred in");
nm->method()->print_short_name(tty);
- tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
+ tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d",
fr.pc(),
- (int) os::current_thread_id(),
+ os::current_thread_id(),
trap_reason_name(reason),
trap_action_name(action),
unloaded_class_index);
--- a/hotspot/src/share/vm/runtime/globals.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -1000,9 +1000,6 @@
product(bool, ClassUnloading, true, \
"Do unloading of classes") \
\
- diagnostic(bool, LinkWellKnownClasses, false, \
- "Resolve a well known class as soon as its name is seen") \
- \
develop(bool, DisableStartThread, false, \
"Disable starting of additional Java threads " \
"(for debugging only)") \
@@ -2329,7 +2326,7 @@
develop(bool, CITimeEach, false, \
"display timing information after each successful compilation") \
\
- develop(bool, CICountOSR, true, \
+ develop(bool, CICountOSR, false, \
"use a separate counter when assigning ids to osr compilations") \
\
develop(bool, CICompileNatives, true, \
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -421,6 +421,7 @@
case T_ARRAY:
case T_OBJECT:
case T_NARROWOOP:
+ case T_NARROWKLASS:
case T_ADDRESS:
// Currently unsupported
return NULL;
--- a/hotspot/src/share/vm/runtime/thread.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -2583,6 +2583,12 @@
StackFrameStream fst(this, UseBiasedLocking);
for(; !fst.is_done(); fst.next()) {
if (fst.current()->should_be_deoptimized()) {
+ if (LogCompilation && xtty != NULL) {
+ nmethod* nm = fst.current()->cb()->as_nmethod_or_null();
+ xtty->elem("deoptimized thread='" UINTX_FORMAT "' compile_id='%d'",
+ this->name(), nm != NULL ? nm->compile_id() : -1);
+ }
+
Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
}
}
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -454,6 +454,8 @@
static_field(Universe, _narrow_oop._base, address) \
static_field(Universe, _narrow_oop._shift, int) \
static_field(Universe, _narrow_oop._use_implicit_null_checks, bool) \
+ static_field(Universe, _narrow_klass._base, address) \
+ static_field(Universe, _narrow_klass._shift, int) \
\
/**********************************************************************************/ \
/* Generation and Space hierarchies */ \
@@ -1727,6 +1729,8 @@
declare_c2_type(CMoveNNode, CMoveNode) \
declare_c2_type(EncodePNode, TypeNode) \
declare_c2_type(DecodeNNode, TypeNode) \
+ declare_c2_type(EncodePKlassNode, TypeNode) \
+ declare_c2_type(DecodeNKlassNode, TypeNode) \
declare_c2_type(ConstraintCastNode, TypeNode) \
declare_c2_type(CastIINode, ConstraintCastNode) \
declare_c2_type(CastPPNode, ConstraintCastNode) \
@@ -1823,6 +1827,7 @@
declare_c2_type(StoreDNode, StoreNode) \
declare_c2_type(StorePNode, StoreNode) \
declare_c2_type(StoreNNode, StoreNode) \
+ declare_c2_type(StoreNKlassNode, StoreNode) \
declare_c2_type(StoreCMNode, StoreNode) \
declare_c2_type(LoadPLockedNode, LoadPNode) \
declare_c2_type(SCMemProjNode, ProjNode) \
--- a/hotspot/src/share/vm/utilities/globalDefinitions.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -111,11 +111,12 @@
case T_DOUBLE:
case T_LONG:
case T_OBJECT:
- case T_ADDRESS: // random raw pointer
- case T_METADATA: // metadata pointer
- case T_NARROWOOP: // compressed pointer
- case T_CONFLICT: // might as well support a bottom type
- case T_VOID: // padding or other unaddressed word
+ case T_ADDRESS: // random raw pointer
+ case T_METADATA: // metadata pointer
+ case T_NARROWOOP: // compressed pointer
+ case T_NARROWKLASS: // compressed klass pointer
+ case T_CONFLICT: // might as well support a bottom type
+ case T_VOID: // padding or other unaddressed word
// layout type must map to itself
assert(vt == ft, "");
break;
@@ -179,7 +180,7 @@
// Map BasicType to signature character
-char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0, 0, 0};
+char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0, 0, 0, 0};
// Map BasicType to Java type name
const char* type2name_tab[T_CONFLICT+1] = {
@@ -198,6 +199,7 @@
"*address*",
"*narrowoop*",
"*metadata*",
+ "*narrowklass*",
"*conflict*"
};
@@ -213,7 +215,7 @@
// Map BasicType to size in words
-int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, -1};
+int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, -1};
BasicType type2field[T_CONFLICT+1] = {
(BasicType)0, // 0,
@@ -234,7 +236,8 @@
T_ADDRESS, // T_ADDRESS = 15,
T_NARROWOOP, // T_NARROWOOP= 16,
T_METADATA, // T_METADATA = 17,
- T_CONFLICT // T_CONFLICT = 18,
+ T_NARROWKLASS, // T_NARROWKLASS = 18,
+ T_CONFLICT // T_CONFLICT = 19,
};
@@ -257,30 +260,32 @@
T_ADDRESS, // T_ADDRESS = 15,
T_NARROWOOP, // T_NARROWOOP = 16,
T_METADATA, // T_METADATA = 17,
- T_CONFLICT // T_CONFLICT = 18,
+ T_NARROWKLASS, // T_NARROWKLASS = 18,
+ T_CONFLICT // T_CONFLICT = 19,
};
int _type2aelembytes[T_CONFLICT+1] = {
- 0, // 0
- 0, // 1
- 0, // 2
- 0, // 3
- T_BOOLEAN_aelem_bytes, // T_BOOLEAN = 4,
- T_CHAR_aelem_bytes, // T_CHAR = 5,
- T_FLOAT_aelem_bytes, // T_FLOAT = 6,
- T_DOUBLE_aelem_bytes, // T_DOUBLE = 7,
- T_BYTE_aelem_bytes, // T_BYTE = 8,
- T_SHORT_aelem_bytes, // T_SHORT = 9,
- T_INT_aelem_bytes, // T_INT = 10,
- T_LONG_aelem_bytes, // T_LONG = 11,
- T_OBJECT_aelem_bytes, // T_OBJECT = 12,
- T_ARRAY_aelem_bytes, // T_ARRAY = 13,
- 0, // T_VOID = 14,
- T_OBJECT_aelem_bytes, // T_ADDRESS = 15,
- T_NARROWOOP_aelem_bytes,// T_NARROWOOP= 16,
- T_OBJECT_aelem_bytes, // T_METADATA = 17,
- 0 // T_CONFLICT = 18,
+ 0, // 0
+ 0, // 1
+ 0, // 2
+ 0, // 3
+ T_BOOLEAN_aelem_bytes, // T_BOOLEAN = 4,
+ T_CHAR_aelem_bytes, // T_CHAR = 5,
+ T_FLOAT_aelem_bytes, // T_FLOAT = 6,
+ T_DOUBLE_aelem_bytes, // T_DOUBLE = 7,
+ T_BYTE_aelem_bytes, // T_BYTE = 8,
+ T_SHORT_aelem_bytes, // T_SHORT = 9,
+ T_INT_aelem_bytes, // T_INT = 10,
+ T_LONG_aelem_bytes, // T_LONG = 11,
+ T_OBJECT_aelem_bytes, // T_OBJECT = 12,
+ T_ARRAY_aelem_bytes, // T_ARRAY = 13,
+ 0, // T_VOID = 14,
+ T_OBJECT_aelem_bytes, // T_ADDRESS = 15,
+ T_NARROWOOP_aelem_bytes, // T_NARROWOOP= 16,
+ T_OBJECT_aelem_bytes, // T_METADATA = 17,
+ T_NARROWKLASS_aelem_bytes, // T_NARROWKLASS= 18,
+ 0 // T_CONFLICT = 19,
};
#ifdef ASSERT
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Fri Oct 12 09:22:52 2012 -0700
@@ -347,6 +347,14 @@
extern int LogMinObjAlignment;
extern int LogMinObjAlignmentInBytes;
+const int LogKlassAlignmentInBytes = 3;
+const int LogKlassAlignment = LogKlassAlignmentInBytes - LogHeapWordSize;
+const int KlassAlignmentInBytes = 1 << LogKlassAlignmentInBytes;
+const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize;
+
+// Klass encoding metaspace max size
+const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
+
// Machine dependent stuff
#ifdef TARGET_ARCH_x86
@@ -481,22 +489,23 @@
// NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java
enum BasicType {
- T_BOOLEAN = 4,
- T_CHAR = 5,
- T_FLOAT = 6,
- T_DOUBLE = 7,
- T_BYTE = 8,
- T_SHORT = 9,
- T_INT = 10,
- T_LONG = 11,
- T_OBJECT = 12,
- T_ARRAY = 13,
- T_VOID = 14,
- T_ADDRESS = 15,
- T_NARROWOOP= 16,
- T_METADATA = 17,
- T_CONFLICT = 18, // for stack value type with conflicting contents
- T_ILLEGAL = 99
+ T_BOOLEAN = 4,
+ T_CHAR = 5,
+ T_FLOAT = 6,
+ T_DOUBLE = 7,
+ T_BYTE = 8,
+ T_SHORT = 9,
+ T_INT = 10,
+ T_LONG = 11,
+ T_OBJECT = 12,
+ T_ARRAY = 13,
+ T_VOID = 14,
+ T_ADDRESS = 15,
+ T_NARROWOOP = 16,
+ T_METADATA = 17,
+ T_NARROWKLASS = 18,
+ T_CONFLICT = 19, // for stack value type with conflicting contents
+ T_ILLEGAL = 99
};
inline bool is_java_primitive(BasicType t) {
@@ -544,18 +553,19 @@
// NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java
enum BasicTypeSize {
- T_BOOLEAN_size = 1,
- T_CHAR_size = 1,
- T_FLOAT_size = 1,
- T_DOUBLE_size = 2,
- T_BYTE_size = 1,
- T_SHORT_size = 1,
- T_INT_size = 1,
- T_LONG_size = 2,
- T_OBJECT_size = 1,
- T_ARRAY_size = 1,
- T_NARROWOOP_size = 1,
- T_VOID_size = 0
+ T_BOOLEAN_size = 1,
+ T_CHAR_size = 1,
+ T_FLOAT_size = 1,
+ T_DOUBLE_size = 2,
+ T_BYTE_size = 1,
+ T_SHORT_size = 1,
+ T_INT_size = 1,
+ T_LONG_size = 2,
+ T_OBJECT_size = 1,
+ T_ARRAY_size = 1,
+ T_NARROWOOP_size = 1,
+ T_NARROWKLASS_size = 1,
+ T_VOID_size = 0
};
@@ -567,23 +577,24 @@
// size in bytes
enum ArrayElementSize {
- T_BOOLEAN_aelem_bytes = 1,
- T_CHAR_aelem_bytes = 2,
- T_FLOAT_aelem_bytes = 4,
- T_DOUBLE_aelem_bytes = 8,
- T_BYTE_aelem_bytes = 1,
- T_SHORT_aelem_bytes = 2,
- T_INT_aelem_bytes = 4,
- T_LONG_aelem_bytes = 8,
+ T_BOOLEAN_aelem_bytes = 1,
+ T_CHAR_aelem_bytes = 2,
+ T_FLOAT_aelem_bytes = 4,
+ T_DOUBLE_aelem_bytes = 8,
+ T_BYTE_aelem_bytes = 1,
+ T_SHORT_aelem_bytes = 2,
+ T_INT_aelem_bytes = 4,
+ T_LONG_aelem_bytes = 8,
#ifdef _LP64
- T_OBJECT_aelem_bytes = 8,
- T_ARRAY_aelem_bytes = 8,
+ T_OBJECT_aelem_bytes = 8,
+ T_ARRAY_aelem_bytes = 8,
#else
- T_OBJECT_aelem_bytes = 4,
- T_ARRAY_aelem_bytes = 4,
+ T_OBJECT_aelem_bytes = 4,
+ T_ARRAY_aelem_bytes = 4,
#endif
- T_NARROWOOP_aelem_bytes = 4,
- T_VOID_aelem_bytes = 0
+ T_NARROWOOP_aelem_bytes = 4,
+ T_NARROWKLASS_aelem_bytes = 4,
+ T_VOID_aelem_bytes = 0
};
extern int _type2aelembytes[T_CONFLICT+1]; // maps a BasicType to nof bytes used by its array element
--- a/hotspot/src/share/vm/utilities/ostream.cpp Tue Oct 09 10:09:34 2012 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.cpp Fri Oct 12 09:22:52 2012 -0700
@@ -759,7 +759,7 @@
if (has_log) {
_log_file->bol();
// output a hint where this output is coming from:
- _log_file->print_cr("<writer thread='"INTX_FORMAT"'/>", writer_id);
+ _log_file->print_cr("<writer thread='" UINTX_FORMAT "'/>", writer_id);
}
_last_writer = writer_id;
}